1 /* Copyright (c) 2014, Google Inc.
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14 
15 #include <assert.h>
16 #include <limits.h>
17 #include <string.h>
18 
19 #include <openssl/aead.h>
20 #include <openssl/cipher.h>
21 #include <openssl/err.h>
22 #include <openssl/hmac.h>
23 #include <openssl/md5.h>
24 #include <openssl/mem.h>
25 #include <openssl/sha.h>
26 #include <openssl/type_check.h>
27 
28 #include "../crypto/internal.h"
29 #include "internal.h"
30 
31 
32 typedef struct {
33   EVP_CIPHER_CTX cipher_ctx;
34   HMAC_CTX hmac_ctx;
35   /* mac_key is the portion of the key used for the MAC. It is retained
36    * separately for the constant-time CBC code. */
37   uint8_t mac_key[EVP_MAX_MD_SIZE];
38   uint8_t mac_key_len;
39   /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
40    * IV. */
41   char implicit_iv;
42 } AEAD_TLS_CTX;
43 
44 OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
45 
aead_tls_cleanup(EVP_AEAD_CTX * ctx)46 static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
47   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
48   EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
49   HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
50   OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
51   OPENSSL_free(tls_ctx);
52   ctx->aead_state = NULL;
53 }
54 
aead_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir,const EVP_CIPHER * cipher,const EVP_MD * md,char implicit_iv)55 static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
56                          size_t tag_len, enum evp_aead_direction_t dir,
57                          const EVP_CIPHER *cipher, const EVP_MD *md,
58                          char implicit_iv) {
59   if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
60       tag_len != EVP_MD_size(md)) {
61     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE);
62     return 0;
63   }
64 
65   if (key_len != EVP_AEAD_key_length(ctx->aead)) {
66     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
67     return 0;
68   }
69 
70   size_t mac_key_len = EVP_MD_size(md);
71   size_t enc_key_len = EVP_CIPHER_key_length(cipher);
72   assert(mac_key_len + enc_key_len +
73          (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
74   /* Although EVP_rc4() is a variable-length cipher, the default key size is
75    * correct for TLS. */
76 
77   AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
78   if (tls_ctx == NULL) {
79     OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
80     return 0;
81   }
82   EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
83   HMAC_CTX_init(&tls_ctx->hmac_ctx);
84   assert(mac_key_len <= EVP_MAX_MD_SIZE);
85   memcpy(tls_ctx->mac_key, key, mac_key_len);
86   tls_ctx->mac_key_len = (uint8_t)mac_key_len;
87   tls_ctx->implicit_iv = implicit_iv;
88 
89   ctx->aead_state = tls_ctx;
90   if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
91                          implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
92                          dir == evp_aead_seal) ||
93       !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
94     aead_tls_cleanup(ctx);
95     ctx->aead_state = NULL;
96     return 0;
97   }
98   EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
99 
100   return 1;
101 }
102 
aead_tls_seal(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)103 static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
104                          size_t *out_len, size_t max_out_len,
105                          const uint8_t *nonce, size_t nonce_len,
106                          const uint8_t *in, size_t in_len,
107                          const uint8_t *ad, size_t ad_len) {
108   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
109   size_t total = 0;
110 
111   if (!tls_ctx->cipher_ctx.encrypt) {
112     /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
113     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
114     return 0;
115   }
116 
117   if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
118       in_len > INT_MAX) {
119     /* EVP_CIPHER takes int as input. */
120     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
121     return 0;
122   }
123 
124   if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
125     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
126     return 0;
127   }
128 
129   if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
130     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
131     return 0;
132   }
133 
134   if (ad_len != 13 - 2 /* length bytes */) {
135     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
136     return 0;
137   }
138 
139   /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
140    * length for legacy ciphers. */
141   uint8_t ad_extra[2];
142   ad_extra[0] = (uint8_t)(in_len >> 8);
143   ad_extra[1] = (uint8_t)(in_len & 0xff);
144 
145   /* Compute the MAC. This must be first in case the operation is being done
146    * in-place. */
147   uint8_t mac[EVP_MAX_MD_SIZE];
148   unsigned mac_len;
149   if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
150       !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) ||
151       !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) ||
152       !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) ||
153       !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) {
154     return 0;
155   }
156 
157   /* Configure the explicit IV. */
158   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
159       !tls_ctx->implicit_iv &&
160       !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
161     return 0;
162   }
163 
164   /* Encrypt the input. */
165   int len;
166   if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
167                          (int)in_len)) {
168     return 0;
169   }
170   total = len;
171 
172   /* Feed the MAC into the cipher. */
173   if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
174                          (int)mac_len)) {
175     return 0;
176   }
177   total += len;
178 
179   unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
180   if (block_size > 1) {
181     assert(block_size <= 256);
182     assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
183 
184     /* Compute padding and feed that into the cipher. */
185     uint8_t padding[256];
186     unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
187     memset(padding, padding_len - 1, padding_len);
188     if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
189                            (int)padding_len)) {
190       return 0;
191     }
192     total += len;
193   }
194 
195   if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
196     return 0;
197   }
198   total += len;
199 
200   *out_len = total;
201   return 1;
202 }
203 
aead_tls_open(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)204 static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
205                          size_t *out_len, size_t max_out_len,
206                          const uint8_t *nonce, size_t nonce_len,
207                          const uint8_t *in, size_t in_len,
208                          const uint8_t *ad, size_t ad_len) {
209   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
210 
211   if (tls_ctx->cipher_ctx.encrypt) {
212     /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
213     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
214     return 0;
215   }
216 
217   if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
218     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
219     return 0;
220   }
221 
222   if (max_out_len < in_len) {
223     /* This requires that the caller provide space for the MAC, even though it
224      * will always be removed on return. */
225     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
226     return 0;
227   }
228 
229   if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
230     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
231     return 0;
232   }
233 
234   if (ad_len != 13 - 2 /* length bytes */) {
235     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
236     return 0;
237   }
238 
239   if (in_len > INT_MAX) {
240     /* EVP_CIPHER takes int as input. */
241     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
242     return 0;
243   }
244 
245   /* Configure the explicit IV. */
246   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
247       !tls_ctx->implicit_iv &&
248       !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
249     return 0;
250   }
251 
252   /* Decrypt to get the plaintext + MAC + padding. */
253   size_t total = 0;
254   int len;
255   if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
256     return 0;
257   }
258   total += len;
259   if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
260     return 0;
261   }
262   total += len;
263   assert(total == in_len);
264 
265   /* Remove CBC padding. Code from here on is timing-sensitive with respect to
266    * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
267   int padding_ok;
268   unsigned data_plus_mac_len, data_len;
269   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
270     padding_ok = EVP_tls_cbc_remove_padding(
271         &data_plus_mac_len, out, total,
272         EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
273         (unsigned)HMAC_size(&tls_ctx->hmac_ctx));
274     /* Publicly invalid. This can be rejected in non-constant time. */
275     if (padding_ok == 0) {
276       OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
277       return 0;
278     }
279   } else {
280     padding_ok = 1;
281     data_plus_mac_len = total;
282     /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
283      * already been checked against the MAC size at the top of the function. */
284     assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
285   }
286   data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
287 
288   /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the
289    * first |data_plus_mac_size| bytes after |out| are the plaintext and
290    * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */
291 
292   /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
293    * length for legacy ciphers. */
294   uint8_t ad_fixed[13];
295   memcpy(ad_fixed, ad, 11);
296   ad_fixed[11] = (uint8_t)(data_len >> 8);
297   ad_fixed[12] = (uint8_t)(data_len & 0xff);
298   ad_len += 2;
299 
300   /* Compute the MAC and extract the one in the record. */
301   uint8_t mac[EVP_MAX_MD_SIZE];
302   size_t mac_len;
303   uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
304   uint8_t *record_mac;
305   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
306       EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
307     if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
308                                    ad_fixed, out, data_plus_mac_len, total,
309                                    tls_ctx->mac_key, tls_ctx->mac_key_len)) {
310       OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
311       return 0;
312     }
313     assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
314 
315     record_mac = record_mac_tmp;
316     EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
317   } else {
318     /* We should support the constant-time path for all CBC-mode ciphers
319      * implemented. */
320     assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
321 
322     unsigned mac_len_u;
323     if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
324         !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) ||
325         !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) ||
326         !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) {
327       return 0;
328     }
329     mac_len = mac_len_u;
330 
331     assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
332     record_mac = &out[data_len];
333   }
334 
335   /* Perform the MAC check and the padding check in constant-time. It should be
336    * safe to simply perform the padding check first, but it would not be under a
337    * different choice of MAC location on padding failure. See
338    * EVP_tls_cbc_remove_padding. */
339   unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
340                                        0);
341   good &= constant_time_eq_int(padding_ok, 1);
342   if (!good) {
343     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
344     return 0;
345   }
346 
347   /* End of timing-sensitive code. */
348 
349   *out_len = data_len;
350   return 1;
351 }
352 
aead_rc4_md5_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)353 static int aead_rc4_md5_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
354                                  size_t key_len, size_t tag_len,
355                                  enum evp_aead_direction_t dir) {
356   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_rc4(), EVP_md5(),
357                        0);
358 }
359 
aead_rc4_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)360 static int aead_rc4_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
361                                   size_t key_len, size_t tag_len,
362                                   enum evp_aead_direction_t dir) {
363   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_rc4(), EVP_sha1(),
364                        0);
365 }
366 
aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)367 static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
368                                           size_t key_len, size_t tag_len,
369                                           enum evp_aead_direction_t dir) {
370   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
371                        EVP_sha1(), 0);
372 }
373 
aead_aes_128_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)374 static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
375     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
376     enum evp_aead_direction_t dir) {
377   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
378                        EVP_sha1(), 1);
379 }
380 
aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)381 static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
382                                             const uint8_t *key, size_t key_len,
383                                             size_t tag_len,
384                                             enum evp_aead_direction_t dir) {
385   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
386                        EVP_sha256(), 0);
387 }
388 
aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)389 static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
390                                           size_t key_len, size_t tag_len,
391                                           enum evp_aead_direction_t dir) {
392   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
393                        EVP_sha1(), 0);
394 }
395 
aead_aes_256_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)396 static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
397     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
398     enum evp_aead_direction_t dir) {
399   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
400                        EVP_sha1(), 1);
401 }
402 
aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)403 static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
404                                             const uint8_t *key, size_t key_len,
405                                             size_t tag_len,
406                                             enum evp_aead_direction_t dir) {
407   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
408                        EVP_sha256(), 0);
409 }
410 
aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)411 static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
412                                             const uint8_t *key, size_t key_len,
413                                             size_t tag_len,
414                                             enum evp_aead_direction_t dir) {
415   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
416                        EVP_sha384(), 0);
417 }
418 
aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)419 static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
420                                            const uint8_t *key, size_t key_len,
421                                            size_t tag_len,
422                                            enum evp_aead_direction_t dir) {
423   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
424                        EVP_sha1(), 0);
425 }
426 
aead_des_ede3_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)427 static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
428     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
429     enum evp_aead_direction_t dir) {
430   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
431                        EVP_sha1(), 1);
432 }
433 
aead_rc4_tls_get_rc4_state(const EVP_AEAD_CTX * ctx,const RC4_KEY ** out_key)434 static int aead_rc4_tls_get_rc4_state(const EVP_AEAD_CTX *ctx,
435                                       const RC4_KEY **out_key) {
436   const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
437   if (EVP_CIPHER_CTX_cipher(&tls_ctx->cipher_ctx) != EVP_rc4()) {
438     return 0;
439   }
440 
441   *out_key = (const RC4_KEY*) tls_ctx->cipher_ctx.cipher_data;
442   return 1;
443 }
444 
aead_tls_get_iv(const EVP_AEAD_CTX * ctx,const uint8_t ** out_iv,size_t * out_iv_len)445 static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
446                            size_t *out_iv_len) {
447   const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
448   const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx);
449   if (iv_len <= 1) {
450     return 0;
451   }
452 
453   *out_iv = tls_ctx->cipher_ctx.iv;
454   *out_iv_len = iv_len;
455   return 1;
456 }
457 
aead_null_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)458 static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
459                                    size_t key_len, size_t tag_len,
460                                    enum evp_aead_direction_t dir) {
461   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_enc_null(),
462                        EVP_sha1(), 1 /* implicit iv */);
463 }
464 
465 static const EVP_AEAD aead_rc4_md5_tls = {
466     MD5_DIGEST_LENGTH + 16, /* key len (MD5 + RC4) */
467     0,                      /* nonce len */
468     MD5_DIGEST_LENGTH,      /* overhead */
469     MD5_DIGEST_LENGTH,      /* max tag length */
470     NULL,                   /* init */
471     aead_rc4_md5_tls_init,
472     aead_tls_cleanup,
473     aead_tls_seal,
474     aead_tls_open,
475     aead_rc4_tls_get_rc4_state, /* get_rc4_state */
476     NULL,                       /* get_iv */
477 };
478 
479 static const EVP_AEAD aead_rc4_sha1_tls = {
480     SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + RC4) */
481     0,                      /* nonce len */
482     SHA_DIGEST_LENGTH,      /* overhead */
483     SHA_DIGEST_LENGTH,      /* max tag length */
484     NULL,                   /* init */
485     aead_rc4_sha1_tls_init,
486     aead_tls_cleanup,
487     aead_tls_seal,
488     aead_tls_open,
489     aead_rc4_tls_get_rc4_state, /* get_rc4_state */
490     NULL,                       /* get_iv */
491 };
492 
493 static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
494     SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
495     16,                     /* nonce len (IV) */
496     16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
497     SHA_DIGEST_LENGTH,      /* max tag length */
498     NULL, /* init */
499     aead_aes_128_cbc_sha1_tls_init,
500     aead_tls_cleanup,
501     aead_tls_seal,
502     aead_tls_open,
503     NULL,                   /* get_rc4_state */
504     NULL,                   /* get_iv */
505 };
506 
507 static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
508     SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
509     0,                           /* nonce len */
510     16 + SHA_DIGEST_LENGTH,      /* overhead (padding + SHA1) */
511     SHA_DIGEST_LENGTH,           /* max tag length */
512     NULL, /* init */
513     aead_aes_128_cbc_sha1_tls_implicit_iv_init,
514     aead_tls_cleanup,
515     aead_tls_seal,
516     aead_tls_open,
517     NULL,                        /* get_rc4_state */
518     aead_tls_get_iv,             /* get_iv */
519 };
520 
521 static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
522     SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
523     16,                        /* nonce len (IV) */
524     16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
525     SHA256_DIGEST_LENGTH,      /* max tag length */
526     NULL, /* init */
527     aead_aes_128_cbc_sha256_tls_init,
528     aead_tls_cleanup,
529     aead_tls_seal,
530     aead_tls_open,
531     NULL,                      /* get_rc4_state */
532     NULL,                      /* get_iv */
533 };
534 
535 static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
536     SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
537     16,                     /* nonce len (IV) */
538     16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
539     SHA_DIGEST_LENGTH,      /* max tag length */
540     NULL, /* init */
541     aead_aes_256_cbc_sha1_tls_init,
542     aead_tls_cleanup,
543     aead_tls_seal,
544     aead_tls_open,
545     NULL,                   /* get_rc4_state */
546     NULL,                   /* get_iv */
547 };
548 
549 static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
550     SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
551     0,                           /* nonce len */
552     16 + SHA_DIGEST_LENGTH,      /* overhead (padding + SHA1) */
553     SHA_DIGEST_LENGTH,           /* max tag length */
554     NULL, /* init */
555     aead_aes_256_cbc_sha1_tls_implicit_iv_init,
556     aead_tls_cleanup,
557     aead_tls_seal,
558     aead_tls_open,
559     NULL,                        /* get_rc4_state */
560     aead_tls_get_iv,             /* get_iv */
561 };
562 
563 static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
564     SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
565     16,                        /* nonce len (IV) */
566     16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
567     SHA256_DIGEST_LENGTH,      /* max tag length */
568     NULL, /* init */
569     aead_aes_256_cbc_sha256_tls_init,
570     aead_tls_cleanup,
571     aead_tls_seal,
572     aead_tls_open,
573     NULL,                      /* get_rc4_state */
574     NULL,                      /* get_iv */
575 };
576 
577 static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
578     SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
579     16,                        /* nonce len (IV) */
580     16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
581     SHA384_DIGEST_LENGTH,      /* max tag length */
582     NULL, /* init */
583     aead_aes_256_cbc_sha384_tls_init,
584     aead_tls_cleanup,
585     aead_tls_seal,
586     aead_tls_open,
587     NULL,                      /* get_rc4_state */
588     NULL,                      /* get_iv */
589 };
590 
591 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
592     SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
593     8,                      /* nonce len (IV) */
594     8 + SHA_DIGEST_LENGTH,  /* overhead (padding + SHA1) */
595     SHA_DIGEST_LENGTH,      /* max tag length */
596     NULL, /* init */
597     aead_des_ede3_cbc_sha1_tls_init,
598     aead_tls_cleanup,
599     aead_tls_seal,
600     aead_tls_open,
601     NULL,                   /* get_rc4_state */
602     NULL,                   /* get_iv */
603 };
604 
605 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
606     SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
607     0,                          /* nonce len */
608     8 + SHA_DIGEST_LENGTH,      /* overhead (padding + SHA1) */
609     SHA_DIGEST_LENGTH,          /* max tag length */
610     NULL, /* init */
611     aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
612     aead_tls_cleanup,
613     aead_tls_seal,
614     aead_tls_open,
615     NULL,                       /* get_rc4_state */
616     aead_tls_get_iv,            /* get_iv */
617 };
618 
619 static const EVP_AEAD aead_null_sha1_tls = {
620     SHA_DIGEST_LENGTH,          /* key len */
621     0,                          /* nonce len */
622     SHA_DIGEST_LENGTH,          /* overhead (SHA1) */
623     SHA_DIGEST_LENGTH,          /* max tag length */
624     NULL,                       /* init */
625     aead_null_sha1_tls_init,
626     aead_tls_cleanup,
627     aead_tls_seal,
628     aead_tls_open,
629     NULL,                       /* get_rc4_state */
630     NULL,                       /* get_iv */
631 };
632 
EVP_aead_rc4_md5_tls(void)633 const EVP_AEAD *EVP_aead_rc4_md5_tls(void) { return &aead_rc4_md5_tls; }
634 
EVP_aead_rc4_sha1_tls(void)635 const EVP_AEAD *EVP_aead_rc4_sha1_tls(void) { return &aead_rc4_sha1_tls; }
636 
EVP_aead_aes_128_cbc_sha1_tls(void)637 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
638   return &aead_aes_128_cbc_sha1_tls;
639 }
640 
EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void)641 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
642   return &aead_aes_128_cbc_sha1_tls_implicit_iv;
643 }
644 
EVP_aead_aes_128_cbc_sha256_tls(void)645 const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
646   return &aead_aes_128_cbc_sha256_tls;
647 }
648 
EVP_aead_aes_256_cbc_sha1_tls(void)649 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
650   return &aead_aes_256_cbc_sha1_tls;
651 }
652 
EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void)653 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
654   return &aead_aes_256_cbc_sha1_tls_implicit_iv;
655 }
656 
EVP_aead_aes_256_cbc_sha256_tls(void)657 const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
658   return &aead_aes_256_cbc_sha256_tls;
659 }
660 
EVP_aead_aes_256_cbc_sha384_tls(void)661 const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
662   return &aead_aes_256_cbc_sha384_tls;
663 }
664 
EVP_aead_des_ede3_cbc_sha1_tls(void)665 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
666   return &aead_des_ede3_cbc_sha1_tls;
667 }
668 
EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void)669 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
670   return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
671 }
672 
EVP_aead_null_sha1_tls(void)673 const EVP_AEAD *EVP_aead_null_sha1_tls(void) { return &aead_null_sha1_tls; }
674