1 /* Copyright (c) 2014, Google Inc.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
15 #include <assert.h>
16 #include <limits.h>
17 #include <string.h>
18
19 #include <openssl/aead.h>
20 #include <openssl/cipher.h>
21 #include <openssl/err.h>
22 #include <openssl/hmac.h>
23 #include <openssl/md5.h>
24 #include <openssl/mem.h>
25 #include <openssl/sha.h>
26 #include <openssl/type_check.h>
27
28 #include "../internal.h"
29 #include "internal.h"
30
31
32 typedef struct {
33 EVP_CIPHER_CTX cipher_ctx;
34 HMAC_CTX hmac_ctx;
35 /* mac_key is the portion of the key used for the MAC. It is retained
36 * separately for the constant-time CBC code. */
37 uint8_t mac_key[EVP_MAX_MD_SIZE];
38 uint8_t mac_key_len;
39 /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
40 * IV. */
41 char implicit_iv;
42 } AEAD_TLS_CTX;
43
44 OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
45
aead_tls_cleanup(EVP_AEAD_CTX * ctx)46 static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
47 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
48 EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
49 HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
50 OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
51 OPENSSL_free(tls_ctx);
52 ctx->aead_state = NULL;
53 }
54
aead_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir,const EVP_CIPHER * cipher,const EVP_MD * md,char implicit_iv)55 static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
56 size_t tag_len, enum evp_aead_direction_t dir,
57 const EVP_CIPHER *cipher, const EVP_MD *md,
58 char implicit_iv) {
59 if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
60 tag_len != EVP_MD_size(md)) {
61 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE);
62 return 0;
63 }
64
65 if (key_len != EVP_AEAD_key_length(ctx->aead)) {
66 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
67 return 0;
68 }
69
70 size_t mac_key_len = EVP_MD_size(md);
71 size_t enc_key_len = EVP_CIPHER_key_length(cipher);
72 assert(mac_key_len + enc_key_len +
73 (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
74
75 AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
76 if (tls_ctx == NULL) {
77 OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
78 return 0;
79 }
80 EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
81 HMAC_CTX_init(&tls_ctx->hmac_ctx);
82 assert(mac_key_len <= EVP_MAX_MD_SIZE);
83 OPENSSL_memcpy(tls_ctx->mac_key, key, mac_key_len);
84 tls_ctx->mac_key_len = (uint8_t)mac_key_len;
85 tls_ctx->implicit_iv = implicit_iv;
86
87 ctx->aead_state = tls_ctx;
88 if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
89 implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
90 dir == evp_aead_seal) ||
91 !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
92 aead_tls_cleanup(ctx);
93 ctx->aead_state = NULL;
94 return 0;
95 }
96 EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
97
98 return 1;
99 }
100
aead_tls_seal(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)101 static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
102 size_t *out_len, size_t max_out_len,
103 const uint8_t *nonce, size_t nonce_len,
104 const uint8_t *in, size_t in_len,
105 const uint8_t *ad, size_t ad_len) {
106 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
107 size_t total = 0;
108
109 if (!tls_ctx->cipher_ctx.encrypt) {
110 /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
111 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
112 return 0;
113 }
114
115 if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
116 in_len > INT_MAX) {
117 /* EVP_CIPHER takes int as input. */
118 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
119 return 0;
120 }
121
122 if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
123 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
124 return 0;
125 }
126
127 if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
128 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
129 return 0;
130 }
131
132 if (ad_len != 13 - 2 /* length bytes */) {
133 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
134 return 0;
135 }
136
137 /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
138 * length for legacy ciphers. */
139 uint8_t ad_extra[2];
140 ad_extra[0] = (uint8_t)(in_len >> 8);
141 ad_extra[1] = (uint8_t)(in_len & 0xff);
142
143 /* Compute the MAC. This must be first in case the operation is being done
144 * in-place. */
145 uint8_t mac[EVP_MAX_MD_SIZE];
146 unsigned mac_len;
147 if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
148 !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) ||
149 !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) ||
150 !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) ||
151 !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) {
152 return 0;
153 }
154
155 /* Configure the explicit IV. */
156 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
157 !tls_ctx->implicit_iv &&
158 !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
159 return 0;
160 }
161
162 /* Encrypt the input. */
163 int len;
164 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
165 (int)in_len)) {
166 return 0;
167 }
168 total = len;
169
170 /* Feed the MAC into the cipher. */
171 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
172 (int)mac_len)) {
173 return 0;
174 }
175 total += len;
176
177 unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
178 if (block_size > 1) {
179 assert(block_size <= 256);
180 assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
181
182 /* Compute padding and feed that into the cipher. */
183 uint8_t padding[256];
184 unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
185 OPENSSL_memset(padding, padding_len - 1, padding_len);
186 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
187 (int)padding_len)) {
188 return 0;
189 }
190 total += len;
191 }
192
193 if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
194 return 0;
195 }
196 total += len;
197
198 *out_len = total;
199 return 1;
200 }
201
aead_tls_open(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)202 static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
203 size_t *out_len, size_t max_out_len,
204 const uint8_t *nonce, size_t nonce_len,
205 const uint8_t *in, size_t in_len,
206 const uint8_t *ad, size_t ad_len) {
207 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
208
209 if (tls_ctx->cipher_ctx.encrypt) {
210 /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
211 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
212 return 0;
213 }
214
215 if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
216 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
217 return 0;
218 }
219
220 if (max_out_len < in_len) {
221 /* This requires that the caller provide space for the MAC, even though it
222 * will always be removed on return. */
223 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
224 return 0;
225 }
226
227 if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
228 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
229 return 0;
230 }
231
232 if (ad_len != 13 - 2 /* length bytes */) {
233 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
234 return 0;
235 }
236
237 if (in_len > INT_MAX) {
238 /* EVP_CIPHER takes int as input. */
239 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
240 return 0;
241 }
242
243 /* Configure the explicit IV. */
244 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
245 !tls_ctx->implicit_iv &&
246 !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
247 return 0;
248 }
249
250 /* Decrypt to get the plaintext + MAC + padding. */
251 size_t total = 0;
252 int len;
253 if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
254 return 0;
255 }
256 total += len;
257 if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
258 return 0;
259 }
260 total += len;
261 assert(total == in_len);
262
263 /* Remove CBC padding. Code from here on is timing-sensitive with respect to
264 * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
265 unsigned padding_ok, data_plus_mac_len;
266 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
267 if (!EVP_tls_cbc_remove_padding(
268 &padding_ok, &data_plus_mac_len, out, total,
269 EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
270 (unsigned)HMAC_size(&tls_ctx->hmac_ctx))) {
271 /* Publicly invalid. This can be rejected in non-constant time. */
272 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
273 return 0;
274 }
275 } else {
276 padding_ok = ~0u;
277 data_plus_mac_len = total;
278 /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
279 * already been checked against the MAC size at the top of the function. */
280 assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
281 }
282 unsigned data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
283
284 /* At this point, if the padding is valid, the first |data_plus_mac_len| bytes
285 * after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
286 * still large enough to extract a MAC, but it will be irrelevant. */
287
288 /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
289 * length for legacy ciphers. */
290 uint8_t ad_fixed[13];
291 OPENSSL_memcpy(ad_fixed, ad, 11);
292 ad_fixed[11] = (uint8_t)(data_len >> 8);
293 ad_fixed[12] = (uint8_t)(data_len & 0xff);
294 ad_len += 2;
295
296 /* Compute the MAC and extract the one in the record. */
297 uint8_t mac[EVP_MAX_MD_SIZE];
298 size_t mac_len;
299 uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
300 uint8_t *record_mac;
301 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
302 EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
303 if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
304 ad_fixed, out, data_plus_mac_len, total,
305 tls_ctx->mac_key, tls_ctx->mac_key_len)) {
306 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
307 return 0;
308 }
309 assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
310
311 record_mac = record_mac_tmp;
312 EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
313 } else {
314 /* We should support the constant-time path for all CBC-mode ciphers
315 * implemented. */
316 assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
317
318 unsigned mac_len_u;
319 if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
320 !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) ||
321 !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) ||
322 !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) {
323 return 0;
324 }
325 mac_len = mac_len_u;
326
327 assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
328 record_mac = &out[data_len];
329 }
330
331 /* Perform the MAC check and the padding check in constant-time. It should be
332 * safe to simply perform the padding check first, but it would not be under a
333 * different choice of MAC location on padding failure. See
334 * EVP_tls_cbc_remove_padding. */
335 unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
336 0);
337 good &= padding_ok;
338 if (!good) {
339 OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
340 return 0;
341 }
342
343 /* End of timing-sensitive code. */
344
345 *out_len = data_len;
346 return 1;
347 }
348
aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)349 static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
350 size_t key_len, size_t tag_len,
351 enum evp_aead_direction_t dir) {
352 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
353 EVP_sha1(), 0);
354 }
355
aead_aes_128_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)356 static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
357 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
358 enum evp_aead_direction_t dir) {
359 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
360 EVP_sha1(), 1);
361 }
362
aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)363 static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
364 const uint8_t *key, size_t key_len,
365 size_t tag_len,
366 enum evp_aead_direction_t dir) {
367 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
368 EVP_sha256(), 0);
369 }
370
aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)371 static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
372 size_t key_len, size_t tag_len,
373 enum evp_aead_direction_t dir) {
374 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
375 EVP_sha1(), 0);
376 }
377
aead_aes_256_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)378 static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
379 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
380 enum evp_aead_direction_t dir) {
381 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
382 EVP_sha1(), 1);
383 }
384
aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)385 static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
386 const uint8_t *key, size_t key_len,
387 size_t tag_len,
388 enum evp_aead_direction_t dir) {
389 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
390 EVP_sha256(), 0);
391 }
392
aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)393 static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
394 const uint8_t *key, size_t key_len,
395 size_t tag_len,
396 enum evp_aead_direction_t dir) {
397 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
398 EVP_sha384(), 0);
399 }
400
aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)401 static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
402 const uint8_t *key, size_t key_len,
403 size_t tag_len,
404 enum evp_aead_direction_t dir) {
405 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
406 EVP_sha1(), 0);
407 }
408
aead_des_ede3_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)409 static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
410 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
411 enum evp_aead_direction_t dir) {
412 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
413 EVP_sha1(), 1);
414 }
415
aead_tls_get_iv(const EVP_AEAD_CTX * ctx,const uint8_t ** out_iv,size_t * out_iv_len)416 static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
417 size_t *out_iv_len) {
418 const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
419 const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx);
420 if (iv_len <= 1) {
421 return 0;
422 }
423
424 *out_iv = tls_ctx->cipher_ctx.iv;
425 *out_iv_len = iv_len;
426 return 1;
427 }
428
aead_null_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)429 static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
430 size_t key_len, size_t tag_len,
431 enum evp_aead_direction_t dir) {
432 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_enc_null(),
433 EVP_sha1(), 1 /* implicit iv */);
434 }
435
436 static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
437 SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
438 16, /* nonce len (IV) */
439 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
440 SHA_DIGEST_LENGTH, /* max tag length */
441 NULL, /* init */
442 aead_aes_128_cbc_sha1_tls_init,
443 aead_tls_cleanup,
444 aead_tls_seal,
445 aead_tls_open,
446 NULL, /* get_iv */
447 };
448
449 static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
450 SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
451 0, /* nonce len */
452 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
453 SHA_DIGEST_LENGTH, /* max tag length */
454 NULL, /* init */
455 aead_aes_128_cbc_sha1_tls_implicit_iv_init,
456 aead_tls_cleanup,
457 aead_tls_seal,
458 aead_tls_open,
459 aead_tls_get_iv, /* get_iv */
460 };
461
462 static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
463 SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
464 16, /* nonce len (IV) */
465 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
466 SHA256_DIGEST_LENGTH, /* max tag length */
467 NULL, /* init */
468 aead_aes_128_cbc_sha256_tls_init,
469 aead_tls_cleanup,
470 aead_tls_seal,
471 aead_tls_open,
472 NULL, /* get_iv */
473 };
474
475 static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
476 SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
477 16, /* nonce len (IV) */
478 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
479 SHA_DIGEST_LENGTH, /* max tag length */
480 NULL, /* init */
481 aead_aes_256_cbc_sha1_tls_init,
482 aead_tls_cleanup,
483 aead_tls_seal,
484 aead_tls_open,
485 NULL, /* get_iv */
486 };
487
488 static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
489 SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
490 0, /* nonce len */
491 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
492 SHA_DIGEST_LENGTH, /* max tag length */
493 NULL, /* init */
494 aead_aes_256_cbc_sha1_tls_implicit_iv_init,
495 aead_tls_cleanup,
496 aead_tls_seal,
497 aead_tls_open,
498 aead_tls_get_iv, /* get_iv */
499 };
500
501 static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
502 SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
503 16, /* nonce len (IV) */
504 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
505 SHA256_DIGEST_LENGTH, /* max tag length */
506 NULL, /* init */
507 aead_aes_256_cbc_sha256_tls_init,
508 aead_tls_cleanup,
509 aead_tls_seal,
510 aead_tls_open,
511 NULL, /* get_iv */
512 };
513
514 static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
515 SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
516 16, /* nonce len (IV) */
517 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
518 SHA384_DIGEST_LENGTH, /* max tag length */
519 NULL, /* init */
520 aead_aes_256_cbc_sha384_tls_init,
521 aead_tls_cleanup,
522 aead_tls_seal,
523 aead_tls_open,
524 NULL, /* get_iv */
525 };
526
527 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
528 SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
529 8, /* nonce len (IV) */
530 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
531 SHA_DIGEST_LENGTH, /* max tag length */
532 NULL, /* init */
533 aead_des_ede3_cbc_sha1_tls_init,
534 aead_tls_cleanup,
535 aead_tls_seal,
536 aead_tls_open,
537 NULL, /* get_iv */
538 };
539
540 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
541 SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
542 0, /* nonce len */
543 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
544 SHA_DIGEST_LENGTH, /* max tag length */
545 NULL, /* init */
546 aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
547 aead_tls_cleanup,
548 aead_tls_seal,
549 aead_tls_open,
550 aead_tls_get_iv, /* get_iv */
551 };
552
553 static const EVP_AEAD aead_null_sha1_tls = {
554 SHA_DIGEST_LENGTH, /* key len */
555 0, /* nonce len */
556 SHA_DIGEST_LENGTH, /* overhead (SHA1) */
557 SHA_DIGEST_LENGTH, /* max tag length */
558 NULL, /* init */
559 aead_null_sha1_tls_init,
560 aead_tls_cleanup,
561 aead_tls_seal,
562 aead_tls_open,
563 NULL, /* get_iv */
564 };
565
EVP_aead_aes_128_cbc_sha1_tls(void)566 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
567 return &aead_aes_128_cbc_sha1_tls;
568 }
569
EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void)570 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
571 return &aead_aes_128_cbc_sha1_tls_implicit_iv;
572 }
573
EVP_aead_aes_128_cbc_sha256_tls(void)574 const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
575 return &aead_aes_128_cbc_sha256_tls;
576 }
577
EVP_aead_aes_256_cbc_sha1_tls(void)578 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
579 return &aead_aes_256_cbc_sha1_tls;
580 }
581
EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void)582 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
583 return &aead_aes_256_cbc_sha1_tls_implicit_iv;
584 }
585
EVP_aead_aes_256_cbc_sha256_tls(void)586 const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
587 return &aead_aes_256_cbc_sha256_tls;
588 }
589
EVP_aead_aes_256_cbc_sha384_tls(void)590 const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
591 return &aead_aes_256_cbc_sha384_tls;
592 }
593
EVP_aead_des_ede3_cbc_sha1_tls(void)594 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
595 return &aead_des_ede3_cbc_sha1_tls;
596 }
597
EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void)598 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
599 return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
600 }
601
EVP_aead_null_sha1_tls(void)602 const EVP_AEAD *EVP_aead_null_sha1_tls(void) { return &aead_null_sha1_tls; }
603