1 /* ====================================================================
2 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20 *
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
25 *
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
29 *
30 * 6. Redistributions of any form whatsoever must retain the following
31 * acknowledgment:
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ==================================================================== */
48
49 #include <openssl/modes.h>
50
51 #include <assert.h>
52 #include <string.h>
53
54 #include <openssl/mem.h>
55 #include <openssl/cpu.h>
56
57 #include "internal.h"
58 #include "../internal.h"
59
60
61 #if !defined(OPENSSL_NO_ASM) && \
62 (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
63 defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))
64 #define GHASH_ASM
65 #endif
66
67 #if defined(BSWAP4) && STRICT_ALIGNMENT == 1
68 /* redefine, because alignment is ensured */
69 #undef GETU32
70 #define GETU32(p) BSWAP4(*(const uint32_t *)(p))
71 #undef PUTU32
72 #define PUTU32(p, v) *(uint32_t *)(p) = BSWAP4(v)
73 #endif
74
75 #define PACK(s) ((size_t)(s) << (sizeof(size_t) * 8 - 16))
76 #define REDUCE1BIT(V) \
77 do { \
78 if (sizeof(size_t) == 8) { \
79 uint64_t T = OPENSSL_U64(0xe100000000000000) & (0 - (V.lo & 1)); \
80 V.lo = (V.hi << 63) | (V.lo >> 1); \
81 V.hi = (V.hi >> 1) ^ T; \
82 } else { \
83 uint32_t T = 0xe1000000U & (0 - (uint32_t)(V.lo & 1)); \
84 V.lo = (V.hi << 63) | (V.lo >> 1); \
85 V.hi = (V.hi >> 1) ^ ((uint64_t)T << 32); \
86 } \
87 } while (0)
88
89
gcm_init_4bit(u128 Htable[16],uint64_t H[2])90 static void gcm_init_4bit(u128 Htable[16], uint64_t H[2]) {
91 u128 V;
92
93 Htable[0].hi = 0;
94 Htable[0].lo = 0;
95 V.hi = H[0];
96 V.lo = H[1];
97
98 Htable[8] = V;
99 REDUCE1BIT(V);
100 Htable[4] = V;
101 REDUCE1BIT(V);
102 Htable[2] = V;
103 REDUCE1BIT(V);
104 Htable[1] = V;
105 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
106 V = Htable[4];
107 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
108 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
109 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
110 V = Htable[8];
111 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
112 Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
113 Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
114 Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
115 Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
116 Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
117 Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
118
119 #if defined(GHASH_ASM) && defined(OPENSSL_ARM)
120 /* ARM assembler expects specific dword order in Htable. */
121 {
122 int j;
123 const union {
124 long one;
125 char little;
126 } is_endian = {1};
127
128 if (is_endian.little) {
129 for (j = 0; j < 16; ++j) {
130 V = Htable[j];
131 Htable[j].hi = V.lo;
132 Htable[j].lo = V.hi;
133 }
134 } else {
135 for (j = 0; j < 16; ++j) {
136 V = Htable[j];
137 Htable[j].hi = V.lo << 32 | V.lo >> 32;
138 Htable[j].lo = V.hi << 32 | V.hi >> 32;
139 }
140 }
141 }
142 #endif
143 }
144
145 #if !defined(GHASH_ASM) || defined(OPENSSL_AARCH64)
146 static const size_t rem_4bit[16] = {
147 PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
148 PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
149 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
150 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)};
151
gcm_gmult_4bit(uint64_t Xi[2],const u128 Htable[16])152 static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) {
153 u128 Z;
154 int cnt = 15;
155 size_t rem, nlo, nhi;
156 const union {
157 long one;
158 char little;
159 } is_endian = {1};
160
161 nlo = ((const uint8_t *)Xi)[15];
162 nhi = nlo >> 4;
163 nlo &= 0xf;
164
165 Z.hi = Htable[nlo].hi;
166 Z.lo = Htable[nlo].lo;
167
168 while (1) {
169 rem = (size_t)Z.lo & 0xf;
170 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
171 Z.hi = (Z.hi >> 4);
172 if (sizeof(size_t) == 8) {
173 Z.hi ^= rem_4bit[rem];
174 } else {
175 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
176 }
177
178 Z.hi ^= Htable[nhi].hi;
179 Z.lo ^= Htable[nhi].lo;
180
181 if (--cnt < 0) {
182 break;
183 }
184
185 nlo = ((const uint8_t *)Xi)[cnt];
186 nhi = nlo >> 4;
187 nlo &= 0xf;
188
189 rem = (size_t)Z.lo & 0xf;
190 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
191 Z.hi = (Z.hi >> 4);
192 if (sizeof(size_t) == 8) {
193 Z.hi ^= rem_4bit[rem];
194 } else {
195 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
196 }
197
198 Z.hi ^= Htable[nlo].hi;
199 Z.lo ^= Htable[nlo].lo;
200 }
201
202 if (is_endian.little) {
203 #ifdef BSWAP8
204 Xi[0] = BSWAP8(Z.hi);
205 Xi[1] = BSWAP8(Z.lo);
206 #else
207 uint8_t *p = (uint8_t *)Xi;
208 uint32_t v;
209 v = (uint32_t)(Z.hi >> 32);
210 PUTU32(p, v);
211 v = (uint32_t)(Z.hi);
212 PUTU32(p + 4, v);
213 v = (uint32_t)(Z.lo >> 32);
214 PUTU32(p + 8, v);
215 v = (uint32_t)(Z.lo);
216 PUTU32(p + 12, v);
217 #endif
218 } else {
219 Xi[0] = Z.hi;
220 Xi[1] = Z.lo;
221 }
222 }
223
224 /* Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
225 * details... Compiler-generated code doesn't seem to give any
226 * performance improvement, at least not on x86[_64]. It's here
227 * mostly as reference and a placeholder for possible future
228 * non-trivial optimization[s]... */
gcm_ghash_4bit(uint64_t Xi[2],const u128 Htable[16],const uint8_t * inp,size_t len)229 static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
230 size_t len) {
231 u128 Z;
232 int cnt;
233 size_t rem, nlo, nhi;
234 const union {
235 long one;
236 char little;
237 } is_endian = {1};
238
239 do {
240 cnt = 15;
241 nlo = ((const uint8_t *)Xi)[15];
242 nlo ^= inp[15];
243 nhi = nlo >> 4;
244 nlo &= 0xf;
245
246 Z.hi = Htable[nlo].hi;
247 Z.lo = Htable[nlo].lo;
248
249 while (1) {
250 rem = (size_t)Z.lo & 0xf;
251 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
252 Z.hi = (Z.hi >> 4);
253 if (sizeof(size_t) == 8) {
254 Z.hi ^= rem_4bit[rem];
255 } else {
256 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
257 }
258
259 Z.hi ^= Htable[nhi].hi;
260 Z.lo ^= Htable[nhi].lo;
261
262 if (--cnt < 0) {
263 break;
264 }
265
266 nlo = ((const uint8_t *)Xi)[cnt];
267 nlo ^= inp[cnt];
268 nhi = nlo >> 4;
269 nlo &= 0xf;
270
271 rem = (size_t)Z.lo & 0xf;
272 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
273 Z.hi = (Z.hi >> 4);
274 if (sizeof(size_t) == 8) {
275 Z.hi ^= rem_4bit[rem];
276 } else {
277 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
278 }
279
280 Z.hi ^= Htable[nlo].hi;
281 Z.lo ^= Htable[nlo].lo;
282 }
283
284 if (is_endian.little) {
285 #ifdef BSWAP8
286 Xi[0] = BSWAP8(Z.hi);
287 Xi[1] = BSWAP8(Z.lo);
288 #else
289 uint8_t *p = (uint8_t *)Xi;
290 uint32_t v;
291 v = (uint32_t)(Z.hi >> 32);
292 PUTU32(p, v);
293 v = (uint32_t)(Z.hi);
294 PUTU32(p + 4, v);
295 v = (uint32_t)(Z.lo >> 32);
296 PUTU32(p + 8, v);
297 v = (uint32_t)(Z.lo);
298 PUTU32(p + 12, v);
299 #endif
300 } else {
301 Xi[0] = Z.hi;
302 Xi[1] = Z.lo;
303 }
304 } while (inp += 16, len -= 16);
305 }
306 #else /* GHASH_ASM */
307 void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]);
308 void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
309 size_t len);
310 #endif
311
312 #define GCM_MUL(ctx, Xi) gcm_gmult_4bit(ctx->Xi.u, ctx->Htable)
313 #if defined(GHASH_ASM)
314 #define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len)
315 /* GHASH_CHUNK is "stride parameter" missioned to mitigate cache
316 * trashing effect. In other words idea is to hash data while it's
317 * still in L1 cache after encryption pass... */
318 #define GHASH_CHUNK (3 * 1024)
319 #endif
320
321
322 #if defined(GHASH_ASM)
323 #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
324 #define GHASH_ASM_X86_OR_64
325 #define GCM_FUNCREF_4BIT
326 void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
327 void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
328 void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
329 size_t len);
330
331 #if defined(OPENSSL_X86)
332 #define gcm_init_avx gcm_init_clmul
333 #define gcm_gmult_avx gcm_gmult_clmul
334 #define gcm_ghash_avx gcm_ghash_clmul
335 #else
336 void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
337 void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
338 void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len);
339 #endif
340
341 #if defined(OPENSSL_X86)
342 #define GHASH_ASM_X86
343 void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]);
344 void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
345 size_t len);
346
347 void gcm_gmult_4bit_x86(uint64_t Xi[2], const u128 Htable[16]);
348 void gcm_ghash_4bit_x86(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
349 size_t len);
350 #endif
351 #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
352 #include "../arm_arch.h"
353 #if __ARM_ARCH__ >= 7
354 #define GHASH_ASM_ARM
355 #define GCM_FUNCREF_4BIT
356
pmull_capable()357 static int pmull_capable() {
358 return (OPENSSL_armcap_P & ARMV8_PMULL) != 0;
359 }
360
361 void gcm_init_v8(u128 Htable[16], const uint64_t Xi[2]);
362 void gcm_gmult_v8(uint64_t Xi[2], const u128 Htable[16]);
363 void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
364 size_t len);
365
366 #if defined(OPENSSL_ARM)
367 /* 32-bit ARM also has support for doing GCM with NEON instructions. */
neon_capable()368 static int neon_capable() {
369 return CRYPTO_is_NEON_capable();
370 }
371
372 void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]);
373 void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]);
374 void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
375 size_t len);
376 #else
377 /* AArch64 only has the ARMv8 versions of functions. */
neon_capable()378 static int neon_capable() {
379 return 0;
380 }
gcm_init_neon(u128 Htable[16],const uint64_t Xi[2])381 void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
382 abort();
383 }
gcm_gmult_neon(uint64_t Xi[2],const u128 Htable[16])384 void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
385 abort();
386 }
gcm_ghash_neon(uint64_t Xi[2],const u128 Htable[16],const uint8_t * inp,size_t len)387 void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
388 size_t len) {
389 abort();
390 }
391 #endif
392
393 #endif
394 #endif
395 #endif
396
397 #ifdef GCM_FUNCREF_4BIT
398 #undef GCM_MUL
399 #define GCM_MUL(ctx, Xi) (*gcm_gmult_p)(ctx->Xi.u, ctx->Htable)
400 #ifdef GHASH
401 #undef GHASH
402 #define GHASH(ctx, in, len) (*gcm_ghash_p)(ctx->Xi.u, ctx->Htable, in, len)
403 #endif
404 #endif
405
CRYPTO_gcm128_new(void * key,block128_f block)406 GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block) {
407 GCM128_CONTEXT *ret;
408
409 ret = (GCM128_CONTEXT *)OPENSSL_malloc(sizeof(GCM128_CONTEXT));
410 if (ret != NULL) {
411 CRYPTO_gcm128_init(ret, key, block);
412 }
413
414 return ret;
415 }
416
CRYPTO_gcm128_init(GCM128_CONTEXT * ctx,void * key,block128_f block)417 void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block) {
418 const union {
419 long one;
420 char little;
421 } is_endian = {1};
422
423 memset(ctx, 0, sizeof(*ctx));
424 ctx->block = block;
425 ctx->key = key;
426
427 (*block)(ctx->H.c, ctx->H.c, key);
428
429 if (is_endian.little) {
430 /* H is stored in host byte order */
431 #ifdef BSWAP8
432 ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
433 ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
434 #else
435 uint8_t *p = ctx->H.c;
436 uint64_t hi, lo;
437 hi = (uint64_t)GETU32(p) << 32 | GETU32(p + 4);
438 lo = (uint64_t)GETU32(p + 8) << 32 | GETU32(p + 12);
439 ctx->H.u[0] = hi;
440 ctx->H.u[1] = lo;
441 #endif
442 }
443
444 #if defined(GHASH_ASM_X86_OR_64)
445 if (crypto_gcm_clmul_enabled()) {
446 if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
447 gcm_init_avx(ctx->Htable, ctx->H.u);
448 ctx->gmult = gcm_gmult_avx;
449 ctx->ghash = gcm_ghash_avx;
450 } else {
451 gcm_init_clmul(ctx->Htable, ctx->H.u);
452 ctx->gmult = gcm_gmult_clmul;
453 ctx->ghash = gcm_ghash_clmul;
454 }
455 return;
456 }
457 gcm_init_4bit(ctx->Htable, ctx->H.u);
458 #if defined(GHASH_ASM_X86) /* x86 only */
459 if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
460 ctx->gmult = gcm_gmult_4bit_mmx;
461 ctx->ghash = gcm_ghash_4bit_mmx;
462 } else {
463 ctx->gmult = gcm_gmult_4bit_x86;
464 ctx->ghash = gcm_ghash_4bit_x86;
465 }
466 #else
467 ctx->gmult = gcm_gmult_4bit;
468 ctx->ghash = gcm_ghash_4bit;
469 #endif
470 #elif defined(GHASH_ASM_ARM)
471 if (pmull_capable()) {
472 gcm_init_v8(ctx->Htable, ctx->H.u);
473 ctx->gmult = gcm_gmult_v8;
474 ctx->ghash = gcm_ghash_v8;
475 } else if (neon_capable()) {
476 gcm_init_neon(ctx->Htable,ctx->H.u);
477 ctx->gmult = gcm_gmult_neon;
478 ctx->ghash = gcm_ghash_neon;
479 } else {
480 gcm_init_4bit(ctx->Htable, ctx->H.u);
481 ctx->gmult = gcm_gmult_4bit;
482 ctx->ghash = gcm_ghash_4bit;
483 }
484 #else
485 gcm_init_4bit(ctx->Htable, ctx->H.u);
486 ctx->gmult = gcm_gmult_4bit;
487 ctx->ghash = gcm_ghash_4bit;
488 #endif
489 }
490
CRYPTO_gcm128_setiv(GCM128_CONTEXT * ctx,const uint8_t * iv,size_t len)491 void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const uint8_t *iv, size_t len) {
492 const union {
493 long one;
494 char little;
495 } is_endian = {1};
496 unsigned int ctr;
497 #ifdef GCM_FUNCREF_4BIT
498 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
499 #endif
500
501 ctx->Yi.u[0] = 0;
502 ctx->Yi.u[1] = 0;
503 ctx->Xi.u[0] = 0;
504 ctx->Xi.u[1] = 0;
505 ctx->len.u[0] = 0; /* AAD length */
506 ctx->len.u[1] = 0; /* message length */
507 ctx->ares = 0;
508 ctx->mres = 0;
509
510 if (len == 12) {
511 memcpy(ctx->Yi.c, iv, 12);
512 ctx->Yi.c[15] = 1;
513 ctr = 1;
514 } else {
515 size_t i;
516 uint64_t len0 = len;
517
518 while (len >= 16) {
519 for (i = 0; i < 16; ++i) {
520 ctx->Yi.c[i] ^= iv[i];
521 }
522 GCM_MUL(ctx, Yi);
523 iv += 16;
524 len -= 16;
525 }
526 if (len) {
527 for (i = 0; i < len; ++i) {
528 ctx->Yi.c[i] ^= iv[i];
529 }
530 GCM_MUL(ctx, Yi);
531 }
532 len0 <<= 3;
533 if (is_endian.little) {
534 #ifdef BSWAP8
535 ctx->Yi.u[1] ^= BSWAP8(len0);
536 #else
537 ctx->Yi.c[8] ^= (uint8_t)(len0 >> 56);
538 ctx->Yi.c[9] ^= (uint8_t)(len0 >> 48);
539 ctx->Yi.c[10] ^= (uint8_t)(len0 >> 40);
540 ctx->Yi.c[11] ^= (uint8_t)(len0 >> 32);
541 ctx->Yi.c[12] ^= (uint8_t)(len0 >> 24);
542 ctx->Yi.c[13] ^= (uint8_t)(len0 >> 16);
543 ctx->Yi.c[14] ^= (uint8_t)(len0 >> 8);
544 ctx->Yi.c[15] ^= (uint8_t)(len0);
545 #endif
546 } else {
547 ctx->Yi.u[1] ^= len0;
548 }
549
550 GCM_MUL(ctx, Yi);
551
552 if (is_endian.little) {
553 ctr = GETU32(ctx->Yi.c + 12);
554 } else {
555 ctr = ctx->Yi.d[3];
556 }
557 }
558
559 (*ctx->block)(ctx->Yi.c, ctx->EK0.c, ctx->key);
560 ++ctr;
561 if (is_endian.little) {
562 PUTU32(ctx->Yi.c + 12, ctr);
563 } else {
564 ctx->Yi.d[3] = ctr;
565 }
566 }
567
CRYPTO_gcm128_aad(GCM128_CONTEXT * ctx,const uint8_t * aad,size_t len)568 int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) {
569 size_t i;
570 unsigned int n;
571 uint64_t alen = ctx->len.u[0];
572 #ifdef GCM_FUNCREF_4BIT
573 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
574 #ifdef GHASH
575 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
576 size_t len) = ctx->ghash;
577 #endif
578 #endif
579
580 if (ctx->len.u[1]) {
581 return 0;
582 }
583
584 alen += len;
585 if (alen > (OPENSSL_U64(1) << 61) || (sizeof(len) == 8 && alen < len)) {
586 return 0;
587 }
588 ctx->len.u[0] = alen;
589
590 n = ctx->ares;
591 if (n) {
592 while (n && len) {
593 ctx->Xi.c[n] ^= *(aad++);
594 --len;
595 n = (n + 1) % 16;
596 }
597 if (n == 0) {
598 GCM_MUL(ctx, Xi);
599 } else {
600 ctx->ares = n;
601 return 1;
602 }
603 }
604
605 #ifdef GHASH
606 if ((i = (len & (size_t) - 16))) {
607 GHASH(ctx, aad, i);
608 aad += i;
609 len -= i;
610 }
611 #else
612 while (len >= 16) {
613 for (i = 0; i < 16; ++i) {
614 ctx->Xi.c[i] ^= aad[i];
615 }
616 GCM_MUL(ctx, Xi);
617 aad += 16;
618 len -= 16;
619 }
620 #endif
621 if (len) {
622 n = (unsigned int)len;
623 for (i = 0; i < len; ++i) {
624 ctx->Xi.c[i] ^= aad[i];
625 }
626 }
627
628 ctx->ares = n;
629 return 1;
630 }
631
CRYPTO_gcm128_encrypt(GCM128_CONTEXT * ctx,const unsigned char * in,unsigned char * out,size_t len)632 int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const unsigned char *in,
633 unsigned char *out, size_t len) {
634 const union {
635 long one;
636 char little;
637 } is_endian = {1};
638 unsigned int n, ctr;
639 size_t i;
640 uint64_t mlen = ctx->len.u[1];
641 block128_f block = ctx->block;
642 void *key = ctx->key;
643 #ifdef GCM_FUNCREF_4BIT
644 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
645 #ifdef GHASH
646 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
647 size_t len) = ctx->ghash;
648 #endif
649 #endif
650
651 mlen += len;
652 if (mlen > ((OPENSSL_U64(1) << 36) - 32) ||
653 (sizeof(len) == 8 && mlen < len)) {
654 return 0;
655 }
656 ctx->len.u[1] = mlen;
657
658 if (ctx->ares) {
659 /* First call to encrypt finalizes GHASH(AAD) */
660 GCM_MUL(ctx, Xi);
661 ctx->ares = 0;
662 }
663
664 if (is_endian.little) {
665 ctr = GETU32(ctx->Yi.c + 12);
666 } else {
667 ctr = ctx->Yi.d[3];
668 }
669
670 n = ctx->mres;
671 if (n) {
672 while (n && len) {
673 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
674 --len;
675 n = (n + 1) % 16;
676 }
677 if (n == 0) {
678 GCM_MUL(ctx, Xi);
679 } else {
680 ctx->mres = n;
681 return 1;
682 }
683 }
684 if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) {
685 for (i = 0; i < len; ++i) {
686 if (n == 0) {
687 (*block)(ctx->Yi.c, ctx->EKi.c, key);
688 ++ctr;
689 if (is_endian.little) {
690 PUTU32(ctx->Yi.c + 12, ctr);
691 } else {
692 ctx->Yi.d[3] = ctr;
693 }
694 }
695 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
696 n = (n + 1) % 16;
697 if (n == 0) {
698 GCM_MUL(ctx, Xi);
699 }
700 }
701
702 ctx->mres = n;
703 return 1;
704 }
705 #if defined(GHASH) && defined(GHASH_CHUNK)
706 while (len >= GHASH_CHUNK) {
707 size_t j = GHASH_CHUNK;
708
709 while (j) {
710 size_t *out_t = (size_t *)out;
711 const size_t *in_t = (const size_t *)in;
712
713 (*block)(ctx->Yi.c, ctx->EKi.c, key);
714 ++ctr;
715 if (is_endian.little) {
716 PUTU32(ctx->Yi.c + 12, ctr);
717 } else {
718 ctx->Yi.d[3] = ctr;
719 }
720 for (i = 0; i < 16 / sizeof(size_t); ++i) {
721 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
722 }
723 out += 16;
724 in += 16;
725 j -= 16;
726 }
727 GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
728 len -= GHASH_CHUNK;
729 }
730 if ((i = (len & (size_t) - 16))) {
731 size_t j = i;
732
733 while (len >= 16) {
734 size_t *out_t = (size_t *)out;
735 const size_t *in_t = (const size_t *)in;
736
737 (*block)(ctx->Yi.c, ctx->EKi.c, key);
738 ++ctr;
739 if (is_endian.little) {
740 PUTU32(ctx->Yi.c + 12, ctr);
741 } else {
742 ctx->Yi.d[3] = ctr;
743 }
744 for (i = 0; i < 16 / sizeof(size_t); ++i) {
745 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
746 }
747 out += 16;
748 in += 16;
749 len -= 16;
750 }
751 GHASH(ctx, out - j, j);
752 }
753 #else
754 while (len >= 16) {
755 size_t *out_t = (size_t *)out;
756 const size_t *in_t = (const size_t *)in;
757
758 (*block)(ctx->Yi.c, ctx->EKi.c, key);
759 ++ctr;
760 if (is_endian.little) {
761 PUTU32(ctx->Yi.c + 12, ctr);
762 } else {
763 ctx->Yi.d[3] = ctr;
764 }
765 for (i = 0; i < 16 / sizeof(size_t); ++i) {
766 ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
767 }
768 GCM_MUL(ctx, Xi);
769 out += 16;
770 in += 16;
771 len -= 16;
772 }
773 #endif
774 if (len) {
775 (*block)(ctx->Yi.c, ctx->EKi.c, key);
776 ++ctr;
777 if (is_endian.little) {
778 PUTU32(ctx->Yi.c + 12, ctr);
779 } else {
780 ctx->Yi.d[3] = ctr;
781 }
782 while (len--) {
783 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
784 ++n;
785 }
786 }
787
788 ctx->mres = n;
789 return 1;
790 }
791
CRYPTO_gcm128_decrypt(GCM128_CONTEXT * ctx,const unsigned char * in,unsigned char * out,size_t len)792 int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const unsigned char *in,
793 unsigned char *out, size_t len) {
794 const union {
795 long one;
796 char little;
797 } is_endian = {1};
798 unsigned int n, ctr;
799 size_t i;
800 uint64_t mlen = ctx->len.u[1];
801 block128_f block = ctx->block;
802 void *key = ctx->key;
803 #ifdef GCM_FUNCREF_4BIT
804 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
805 #ifdef GHASH
806 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
807 size_t len) = ctx->ghash;
808 #endif
809 #endif
810
811 mlen += len;
812 if (mlen > ((OPENSSL_U64(1) << 36) - 32) ||
813 (sizeof(len) == 8 && mlen < len)) {
814 return 0;
815 }
816 ctx->len.u[1] = mlen;
817
818 if (ctx->ares) {
819 /* First call to decrypt finalizes GHASH(AAD) */
820 GCM_MUL(ctx, Xi);
821 ctx->ares = 0;
822 }
823
824 if (is_endian.little) {
825 ctr = GETU32(ctx->Yi.c + 12);
826 } else {
827 ctr = ctx->Yi.d[3];
828 }
829
830 n = ctx->mres;
831 if (n) {
832 while (n && len) {
833 uint8_t c = *(in++);
834 *(out++) = c ^ ctx->EKi.c[n];
835 ctx->Xi.c[n] ^= c;
836 --len;
837 n = (n + 1) % 16;
838 }
839 if (n == 0) {
840 GCM_MUL(ctx, Xi);
841 } else {
842 ctx->mres = n;
843 return 1;
844 }
845 }
846 if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) {
847 for (i = 0; i < len; ++i) {
848 uint8_t c;
849 if (n == 0) {
850 (*block)(ctx->Yi.c, ctx->EKi.c, key);
851 ++ctr;
852 if (is_endian.little) {
853 PUTU32(ctx->Yi.c + 12, ctr);
854 } else {
855 ctx->Yi.d[3] = ctr;
856 }
857 }
858 c = in[i];
859 out[i] = c ^ ctx->EKi.c[n];
860 ctx->Xi.c[n] ^= c;
861 n = (n + 1) % 16;
862 if (n == 0) {
863 GCM_MUL(ctx, Xi);
864 }
865 }
866
867 ctx->mres = n;
868 return 1;
869 }
870 #if defined(GHASH) && defined(GHASH_CHUNK)
871 while (len >= GHASH_CHUNK) {
872 size_t j = GHASH_CHUNK;
873
874 GHASH(ctx, in, GHASH_CHUNK);
875 while (j) {
876 size_t *out_t = (size_t *)out;
877 const size_t *in_t = (const size_t *)in;
878
879 (*block)(ctx->Yi.c, ctx->EKi.c, key);
880 ++ctr;
881 if (is_endian.little) {
882 PUTU32(ctx->Yi.c + 12, ctr);
883 } else {
884 ctx->Yi.d[3] = ctr;
885 }
886 for (i = 0; i < 16 / sizeof(size_t); ++i) {
887 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
888 }
889 out += 16;
890 in += 16;
891 j -= 16;
892 }
893 len -= GHASH_CHUNK;
894 }
895 if ((i = (len & (size_t) - 16))) {
896 GHASH(ctx, in, i);
897 while (len >= 16) {
898 size_t *out_t = (size_t *)out;
899 const size_t *in_t = (const size_t *)in;
900
901 (*block)(ctx->Yi.c, ctx->EKi.c, key);
902 ++ctr;
903 if (is_endian.little) {
904 PUTU32(ctx->Yi.c + 12, ctr);
905 } else {
906 ctx->Yi.d[3] = ctr;
907 }
908 for (i = 0; i < 16 / sizeof(size_t); ++i) {
909 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
910 }
911 out += 16;
912 in += 16;
913 len -= 16;
914 }
915 }
916 #else
917 while (len >= 16) {
918 size_t *out_t = (size_t *)out;
919 const size_t *in_t = (const size_t *)in;
920
921 (*block)(ctx->Yi.c, ctx->EKi.c, key);
922 ++ctr;
923 if (is_endian.little) {
924 PUTU32(ctx->Yi.c + 12, ctr);
925 } else {
926 ctx->Yi.d[3] = ctr;
927 }
928 for (i = 0; i < 16 / sizeof(size_t); ++i) {
929 size_t c = in_t[i];
930 out_t[i] = c ^ ctx->EKi.t[i];
931 ctx->Xi.t[i] ^= c;
932 }
933 GCM_MUL(ctx, Xi);
934 out += 16;
935 in += 16;
936 len -= 16;
937 }
938 #endif
939 if (len) {
940 (*block)(ctx->Yi.c, ctx->EKi.c, key);
941 ++ctr;
942 if (is_endian.little) {
943 PUTU32(ctx->Yi.c + 12, ctr);
944 } else {
945 ctx->Yi.d[3] = ctr;
946 }
947 while (len--) {
948 uint8_t c = in[n];
949 ctx->Xi.c[n] ^= c;
950 out[n] = c ^ ctx->EKi.c[n];
951 ++n;
952 }
953 }
954
955 ctx->mres = n;
956 return 1;
957 }
958
CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT * ctx,const uint8_t * in,uint8_t * out,size_t len,ctr128_f stream)959 int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const uint8_t *in,
960 uint8_t *out, size_t len, ctr128_f stream) {
961 const union {
962 long one;
963 char little;
964 } is_endian = {1};
965 unsigned int n, ctr;
966 size_t i;
967 uint64_t mlen = ctx->len.u[1];
968 void *key = ctx->key;
969 #ifdef GCM_FUNCREF_4BIT
970 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
971 #ifdef GHASH
972 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
973 size_t len) = ctx->ghash;
974 #endif
975 #endif
976
977 mlen += len;
978 if (mlen > ((OPENSSL_U64(1) << 36) - 32) ||
979 (sizeof(len) == 8 && mlen < len)) {
980 return 0;
981 }
982 ctx->len.u[1] = mlen;
983
984 if (ctx->ares) {
985 /* First call to encrypt finalizes GHASH(AAD) */
986 GCM_MUL(ctx, Xi);
987 ctx->ares = 0;
988 }
989
990 if (is_endian.little) {
991 ctr = GETU32(ctx->Yi.c + 12);
992 } else {
993 ctr = ctx->Yi.d[3];
994 }
995
996 n = ctx->mres;
997 if (n) {
998 while (n && len) {
999 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1000 --len;
1001 n = (n + 1) % 16;
1002 }
1003 if (n == 0) {
1004 GCM_MUL(ctx, Xi);
1005 } else {
1006 ctx->mres = n;
1007 return 1;
1008 }
1009 }
1010 #if defined(GHASH)
1011 while (len >= GHASH_CHUNK) {
1012 (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1013 ctr += GHASH_CHUNK / 16;
1014 if (is_endian.little) {
1015 PUTU32(ctx->Yi.c + 12, ctr);
1016 } else {
1017 ctx->Yi.d[3] = ctr;
1018 }
1019 GHASH(ctx, out, GHASH_CHUNK);
1020 out += GHASH_CHUNK;
1021 in += GHASH_CHUNK;
1022 len -= GHASH_CHUNK;
1023 }
1024 #endif
1025 if ((i = (len & (size_t) - 16))) {
1026 size_t j = i / 16;
1027
1028 (*stream)(in, out, j, key, ctx->Yi.c);
1029 ctr += (unsigned int)j;
1030 if (is_endian.little) {
1031 PUTU32(ctx->Yi.c + 12, ctr);
1032 } else {
1033 ctx->Yi.d[3] = ctr;
1034 }
1035 in += i;
1036 len -= i;
1037 #if defined(GHASH)
1038 GHASH(ctx, out, i);
1039 out += i;
1040 #else
1041 while (j--) {
1042 for (i = 0; i < 16; ++i) {
1043 ctx->Xi.c[i] ^= out[i];
1044 }
1045 GCM_MUL(ctx, Xi);
1046 out += 16;
1047 }
1048 #endif
1049 }
1050 if (len) {
1051 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1052 ++ctr;
1053 if (is_endian.little) {
1054 PUTU32(ctx->Yi.c + 12, ctr);
1055 } else {
1056 ctx->Yi.d[3] = ctr;
1057 }
1058 while (len--) {
1059 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1060 ++n;
1061 }
1062 }
1063
1064 ctx->mres = n;
1065 return 1;
1066 }
1067
CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT * ctx,const uint8_t * in,uint8_t * out,size_t len,ctr128_f stream)1068 int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const uint8_t *in,
1069 uint8_t *out, size_t len,
1070 ctr128_f stream) {
1071 const union {
1072 long one;
1073 char little;
1074 } is_endian = {1};
1075 unsigned int n, ctr;
1076 size_t i;
1077 uint64_t mlen = ctx->len.u[1];
1078 void *key = ctx->key;
1079 #ifdef GCM_FUNCREF_4BIT
1080 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
1081 #ifdef GHASH
1082 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
1083 size_t len) = ctx->ghash;
1084 #endif
1085 #endif
1086
1087 mlen += len;
1088 if (mlen > ((OPENSSL_U64(1) << 36) - 32) ||
1089 (sizeof(len) == 8 && mlen < len)) {
1090 return 0;
1091 }
1092 ctx->len.u[1] = mlen;
1093
1094 if (ctx->ares) {
1095 /* First call to decrypt finalizes GHASH(AAD) */
1096 GCM_MUL(ctx, Xi);
1097 ctx->ares = 0;
1098 }
1099
1100 if (is_endian.little) {
1101 ctr = GETU32(ctx->Yi.c + 12);
1102 } else {
1103 ctr = ctx->Yi.d[3];
1104 }
1105
1106 n = ctx->mres;
1107 if (n) {
1108 while (n && len) {
1109 uint8_t c = *(in++);
1110 *(out++) = c ^ ctx->EKi.c[n];
1111 ctx->Xi.c[n] ^= c;
1112 --len;
1113 n = (n + 1) % 16;
1114 }
1115 if (n == 0) {
1116 GCM_MUL(ctx, Xi);
1117 } else {
1118 ctx->mres = n;
1119 return 1;
1120 }
1121 }
1122 #if defined(GHASH)
1123 while (len >= GHASH_CHUNK) {
1124 GHASH(ctx, in, GHASH_CHUNK);
1125 (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1126 ctr += GHASH_CHUNK / 16;
1127 if (is_endian.little) {
1128 PUTU32(ctx->Yi.c + 12, ctr);
1129 } else {
1130 ctx->Yi.d[3] = ctr;
1131 }
1132 out += GHASH_CHUNK;
1133 in += GHASH_CHUNK;
1134 len -= GHASH_CHUNK;
1135 }
1136 #endif
1137 if ((i = (len & (size_t) - 16))) {
1138 size_t j = i / 16;
1139
1140 #if defined(GHASH)
1141 GHASH(ctx, in, i);
1142 #else
1143 while (j--) {
1144 size_t k;
1145 for (k = 0; k < 16; ++k) {
1146 ctx->Xi.c[k] ^= in[k];
1147 }
1148 GCM_MUL(ctx, Xi);
1149 in += 16;
1150 }
1151 j = i / 16;
1152 in -= i;
1153 #endif
1154 (*stream)(in, out, j, key, ctx->Yi.c);
1155 ctr += (unsigned int)j;
1156 if (is_endian.little) {
1157 PUTU32(ctx->Yi.c + 12, ctr);
1158 } else {
1159 ctx->Yi.d[3] = ctr;
1160 }
1161 out += i;
1162 in += i;
1163 len -= i;
1164 }
1165 if (len) {
1166 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1167 ++ctr;
1168 if (is_endian.little) {
1169 PUTU32(ctx->Yi.c + 12, ctr);
1170 } else {
1171 ctx->Yi.d[3] = ctr;
1172 }
1173 while (len--) {
1174 uint8_t c = in[n];
1175 ctx->Xi.c[n] ^= c;
1176 out[n] = c ^ ctx->EKi.c[n];
1177 ++n;
1178 }
1179 }
1180
1181 ctx->mres = n;
1182 return 1;
1183 }
1184
CRYPTO_gcm128_finish(GCM128_CONTEXT * ctx,const uint8_t * tag,size_t len)1185 int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len) {
1186 const union {
1187 long one;
1188 char little;
1189 } is_endian = {1};
1190 uint64_t alen = ctx->len.u[0] << 3;
1191 uint64_t clen = ctx->len.u[1] << 3;
1192 #ifdef GCM_FUNCREF_4BIT
1193 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
1194 #endif
1195
1196 if (ctx->mres || ctx->ares) {
1197 GCM_MUL(ctx, Xi);
1198 }
1199
1200 if (is_endian.little) {
1201 #ifdef BSWAP8
1202 alen = BSWAP8(alen);
1203 clen = BSWAP8(clen);
1204 #else
1205 uint8_t *p = ctx->len.c;
1206
1207 ctx->len.u[0] = alen;
1208 ctx->len.u[1] = clen;
1209
1210 alen = (uint64_t)GETU32(p) << 32 | GETU32(p + 4);
1211 clen = (uint64_t)GETU32(p + 8) << 32 | GETU32(p + 12);
1212 #endif
1213 }
1214
1215 ctx->Xi.u[0] ^= alen;
1216 ctx->Xi.u[1] ^= clen;
1217 GCM_MUL(ctx, Xi);
1218
1219 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1220 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1221
1222 if (tag && len <= sizeof(ctx->Xi)) {
1223 return CRYPTO_memcmp(ctx->Xi.c, tag, len) == 0;
1224 } else {
1225 return 0;
1226 }
1227 }
1228
CRYPTO_gcm128_tag(GCM128_CONTEXT * ctx,unsigned char * tag,size_t len)1229 void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) {
1230 CRYPTO_gcm128_finish(ctx, NULL, 0);
1231 memcpy(tag, ctx->Xi.c, len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1232 }
1233
CRYPTO_gcm128_release(GCM128_CONTEXT * ctx)1234 void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx) {
1235 if (ctx) {
1236 OPENSSL_cleanse(ctx, sizeof(*ctx));
1237 OPENSSL_free(ctx);
1238 }
1239 }
1240
1241 #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
crypto_gcm_clmul_enabled(void)1242 int crypto_gcm_clmul_enabled(void) {
1243 #ifdef GHASH_ASM
1244 return OPENSSL_ia32cap_P[0] & (1 << 24) && /* check FXSR bit */
1245 OPENSSL_ia32cap_P[1] & (1 << 1); /* check PCLMULQDQ bit */
1246 #else
1247 return 0;
1248 #endif
1249 }
1250 #endif
1251