1 // Copyright 2012 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This is an implementation of the P224 elliptic curve group. It's written to
6 // be short and simple rather than fast, although it's still constant-time.
7 //
8 // See http://www.imperialviolet.org/2010/12/04/ecc.html ([1]) for background.
9
10 #include "third_party/chromium/crypto/p224.h"
11
12 #include <string.h>
13
14 namespace {
15
ByteSwap(uint32_t x)16 inline uint32_t ByteSwap(uint32_t x) {
17 return ((x & 0x000000fful) << 24) | ((x & 0x0000ff00ul) << 8) |
18 ((x & 0x00ff0000ul) >> 8) | ((x & 0xff000000ul) >> 24);
19 }
20
HostToNet32(uint32_t x)21 inline uint32_t HostToNet32(uint32_t x) {
22 #if defined(ARCH_CPU_LITTLE_ENDIAN)
23 return ByteSwap(x);
24 #else
25 return x;
26 #endif
27 }
28
NetToHost32(uint32_t x)29 inline uint32_t NetToHost32(uint32_t x) {
30 #if defined(ARCH_CPU_LITTLE_ENDIAN)
31 return ByteSwap(x);
32 #else
33 return x;
34 #endif
35 }
36
37 // Field element functions.
38 //
39 // The field that we're dealing with is ℤ/pℤ where p = 2**224 - 2**96 + 1.
40 //
41 // Field elements are represented by a FieldElement, which is a typedef to an
42 // array of 8 uint32_t's. The value of a FieldElement, a, is:
43 // a[0] + 2**28·a[1] + 2**56·a[1] + ... + 2**196·a[7]
44 //
45 // Using 28-bit limbs means that there's only 4 bits of headroom, which is less
46 // than we would really like. But it has the useful feature that we hit 2**224
47 // exactly, making the reflections during a reduce much nicer.
48
49 using crypto::p224::FieldElement;
50
51 // kP is the P224 prime.
52 const FieldElement kP = {
53 1, 0, 0, 268431360,
54 268435455, 268435455, 268435455, 268435455,
55 };
56
57 void Contract(FieldElement* inout);
58
59 // IsZero returns 0xffffffff if a == 0 mod p and 0 otherwise.
IsZero(const FieldElement & a)60 uint32_t IsZero(const FieldElement& a) {
61 FieldElement minimal;
62 memcpy(&minimal, &a, sizeof(minimal));
63 Contract(&minimal);
64
65 uint32_t is_zero = 0, is_p = 0;
66 for (unsigned i = 0; i < 8; i++) {
67 is_zero |= minimal[i];
68 is_p |= minimal[i] - kP[i];
69 }
70
71 // If either is_zero or is_p is 0, then we should return 1.
72 is_zero |= is_zero >> 16;
73 is_zero |= is_zero >> 8;
74 is_zero |= is_zero >> 4;
75 is_zero |= is_zero >> 2;
76 is_zero |= is_zero >> 1;
77
78 is_p |= is_p >> 16;
79 is_p |= is_p >> 8;
80 is_p |= is_p >> 4;
81 is_p |= is_p >> 2;
82 is_p |= is_p >> 1;
83
84 // For is_zero and is_p, the LSB is 0 iff all the bits are zero.
85 is_zero &= is_p & 1;
86 is_zero = (~is_zero) << 31;
87 is_zero = static_cast<int32_t>(is_zero) >> 31;
88 return is_zero;
89 }
90
91 // Add computes *out = a+b
92 //
93 // a[i] + b[i] < 2**32
Add(FieldElement * out,const FieldElement & a,const FieldElement & b)94 void Add(FieldElement* out, const FieldElement& a, const FieldElement& b) {
95 for (int i = 0; i < 8; i++) {
96 (*out)[i] = a[i] + b[i];
97 }
98 }
99
100 static const uint32_t kTwo31p3 = (1u << 31) + (1u << 3);
101 static const uint32_t kTwo31m3 = (1u << 31) - (1u << 3);
102 static const uint32_t kTwo31m15m3 = (1u << 31) - (1u << 15) - (1u << 3);
103 // kZero31ModP is 0 mod p where bit 31 is set in all limbs so that we can
104 // subtract smaller amounts without underflow. See the section "Subtraction" in
105 // [1] for why.
106 static const FieldElement kZero31ModP = {
107 kTwo31p3, kTwo31m3, kTwo31m3, kTwo31m15m3,
108 kTwo31m3, kTwo31m3, kTwo31m3, kTwo31m3
109 };
110
111 // Subtract computes *out = a-b
112 //
113 // a[i], b[i] < 2**30
114 // out[i] < 2**32
Subtract(FieldElement * out,const FieldElement & a,const FieldElement & b)115 void Subtract(FieldElement* out, const FieldElement& a, const FieldElement& b) {
116 for (int i = 0; i < 8; i++) {
117 // See the section on "Subtraction" in [1] for details.
118 (*out)[i] = a[i] + kZero31ModP[i] - b[i];
119 }
120 }
121
122 static const uint64_t kTwo63p35 = (1ull << 63) + (1ull << 35);
123 static const uint64_t kTwo63m35 = (1ull << 63) - (1ull << 35);
124 static const uint64_t kTwo63m35m19 = (1ull << 63) - (1ull << 35) - (1ull << 19);
125 // kZero63ModP is 0 mod p where bit 63 is set in all limbs. See the section
126 // "Subtraction" in [1] for why.
127 static const uint64_t kZero63ModP[8] = {
128 kTwo63p35, kTwo63m35, kTwo63m35, kTwo63m35,
129 kTwo63m35m19, kTwo63m35, kTwo63m35, kTwo63m35,
130 };
131
132 static const uint32_t kBottom28Bits = 0xfffffff;
133
134 // LargeFieldElement also represents an element of the field. The limbs are
135 // still spaced 28-bits apart and in little-endian order. So the limbs are at
136 // 0, 28, 56, ..., 392 bits, each 64-bits wide.
137 typedef uint64_t LargeFieldElement[15];
138
139 // ReduceLarge converts a LargeFieldElement to a FieldElement.
140 //
141 // in[i] < 2**62
142
143 // GCC 4.9 incorrectly vectorizes the first coefficient elimination loop, so
144 // disable that optimization via pragma. Don't use the pragma under Clang, since
145 // clang doesn't understand it.
146 // TODO(wez): Remove this when crbug.com/439566 is fixed.
147 #if defined(__GNUC__) && !defined(__clang__)
148 #pragma GCC optimize("no-tree-vectorize")
149 #endif
150
ReduceLarge(FieldElement * out,LargeFieldElement * inptr)151 void ReduceLarge(FieldElement* out, LargeFieldElement* inptr) {
152 LargeFieldElement& in(*inptr);
153
154 for (int i = 0; i < 8; i++) {
155 in[i] += kZero63ModP[i];
156 }
157
158 // Eliminate the coefficients at 2**224 and greater while maintaining the
159 // same value mod p.
160 for (int i = 14; i >= 8; i--) {
161 in[i-8] -= in[i]; // reflection off the "+1" term of p.
162 in[i-5] += (in[i] & 0xffff) << 12; // part of the "-2**96" reflection.
163 in[i-4] += in[i] >> 16; // the rest of the "-2**96" reflection.
164 }
165 in[8] = 0;
166 // in[0..8] < 2**64
167
168 // As the values become small enough, we start to store them in |out| and use
169 // 32-bit operations.
170 for (int i = 1; i < 8; i++) {
171 in[i+1] += in[i] >> 28;
172 (*out)[i] = static_cast<uint32_t>(in[i] & kBottom28Bits);
173 }
174 // Eliminate the term at 2*224 that we introduced while keeping the same
175 // value mod p.
176 in[0] -= in[8]; // reflection off the "+1" term of p.
177 (*out)[3] += static_cast<uint32_t>(in[8] & 0xffff) << 12; // "-2**96" term
178 (*out)[4] += static_cast<uint32_t>(in[8] >> 16); // rest of "-2**96" term
179 // in[0] < 2**64
180 // out[3] < 2**29
181 // out[4] < 2**29
182 // out[1,2,5..7] < 2**28
183
184 (*out)[0] = static_cast<uint32_t>(in[0] & kBottom28Bits);
185 (*out)[1] += static_cast<uint32_t>((in[0] >> 28) & kBottom28Bits);
186 (*out)[2] += static_cast<uint32_t>(in[0] >> 56);
187 // out[0] < 2**28
188 // out[1..4] < 2**29
189 // out[5..7] < 2**28
190 }
191
192 // TODO(wez): Remove this when crbug.com/439566 is fixed.
193 #if defined(__GNUC__) && !defined(__clang__)
194 // Reenable "tree-vectorize" optimization if it got disabled for ReduceLarge.
195 #pragma GCC reset_options
196 #endif
197
198 // Mul computes *out = a*b
199 //
200 // a[i] < 2**29, b[i] < 2**30 (or vice versa)
201 // out[i] < 2**29
Mul(FieldElement * out,const FieldElement & a,const FieldElement & b)202 void Mul(FieldElement* out, const FieldElement& a, const FieldElement& b) {
203 LargeFieldElement tmp;
204 memset(&tmp, 0, sizeof(tmp));
205
206 for (int i = 0; i < 8; i++) {
207 for (int j = 0; j < 8; j++) {
208 tmp[i + j] += static_cast<uint64_t>(a[i]) * static_cast<uint64_t>(b[j]);
209 }
210 }
211
212 ReduceLarge(out, &tmp);
213 }
214
215 // Square computes *out = a*a
216 //
217 // a[i] < 2**29
218 // out[i] < 2**29
Square(FieldElement * out,const FieldElement & a)219 void Square(FieldElement* out, const FieldElement& a) {
220 LargeFieldElement tmp;
221 memset(&tmp, 0, sizeof(tmp));
222
223 for (int i = 0; i < 8; i++) {
224 for (int j = 0; j <= i; j++) {
225 uint64_t r = static_cast<uint64_t>(a[i]) * static_cast<uint64_t>(a[j]);
226 if (i == j) {
227 tmp[i+j] += r;
228 } else {
229 tmp[i+j] += r << 1;
230 }
231 }
232 }
233
234 ReduceLarge(out, &tmp);
235 }
236
237 // Reduce reduces the coefficients of in_out to smaller bounds.
238 //
239 // On entry: a[i] < 2**31 + 2**30
240 // On exit: a[i] < 2**29
Reduce(FieldElement * in_out)241 void Reduce(FieldElement* in_out) {
242 FieldElement& a = *in_out;
243
244 for (int i = 0; i < 7; i++) {
245 a[i+1] += a[i] >> 28;
246 a[i] &= kBottom28Bits;
247 }
248 uint32_t top = a[7] >> 28;
249 a[7] &= kBottom28Bits;
250
251 // top < 2**4
252 // Constant-time: mask = (top != 0) ? 0xffffffff : 0
253 uint32_t mask = top;
254 mask |= mask >> 2;
255 mask |= mask >> 1;
256 mask <<= 31;
257 mask = static_cast<uint32_t>(static_cast<int32_t>(mask) >> 31);
258
259 // Eliminate top while maintaining the same value mod p.
260 a[0] -= top;
261 a[3] += top << 12;
262
263 // We may have just made a[0] negative but, if we did, then we must
264 // have added something to a[3], thus it's > 2**12. Therefore we can
265 // carry down to a[0].
266 a[3] -= 1 & mask;
267 a[2] += mask & ((1<<28) - 1);
268 a[1] += mask & ((1<<28) - 1);
269 a[0] += mask & (1<<28);
270 }
271
272 // Invert calcuates *out = in**-1 by computing in**(2**224 - 2**96 - 1), i.e.
273 // Fermat's little theorem.
Invert(FieldElement * out,const FieldElement & in)274 void Invert(FieldElement* out, const FieldElement& in) {
275 FieldElement f1, f2, f3, f4;
276
277 Square(&f1, in); // 2
278 Mul(&f1, f1, in); // 2**2 - 1
279 Square(&f1, f1); // 2**3 - 2
280 Mul(&f1, f1, in); // 2**3 - 1
281 Square(&f2, f1); // 2**4 - 2
282 Square(&f2, f2); // 2**5 - 4
283 Square(&f2, f2); // 2**6 - 8
284 Mul(&f1, f1, f2); // 2**6 - 1
285 Square(&f2, f1); // 2**7 - 2
286 for (int i = 0; i < 5; i++) { // 2**12 - 2**6
287 Square(&f2, f2);
288 }
289 Mul(&f2, f2, f1); // 2**12 - 1
290 Square(&f3, f2); // 2**13 - 2
291 for (int i = 0; i < 11; i++) { // 2**24 - 2**12
292 Square(&f3, f3);
293 }
294 Mul(&f2, f3, f2); // 2**24 - 1
295 Square(&f3, f2); // 2**25 - 2
296 for (int i = 0; i < 23; i++) { // 2**48 - 2**24
297 Square(&f3, f3);
298 }
299 Mul(&f3, f3, f2); // 2**48 - 1
300 Square(&f4, f3); // 2**49 - 2
301 for (int i = 0; i < 47; i++) { // 2**96 - 2**48
302 Square(&f4, f4);
303 }
304 Mul(&f3, f3, f4); // 2**96 - 1
305 Square(&f4, f3); // 2**97 - 2
306 for (int i = 0; i < 23; i++) { // 2**120 - 2**24
307 Square(&f4, f4);
308 }
309 Mul(&f2, f4, f2); // 2**120 - 1
310 for (int i = 0; i < 6; i++) { // 2**126 - 2**6
311 Square(&f2, f2);
312 }
313 Mul(&f1, f1, f2); // 2**126 - 1
314 Square(&f1, f1); // 2**127 - 2
315 Mul(&f1, f1, in); // 2**127 - 1
316 for (int i = 0; i < 97; i++) { // 2**224 - 2**97
317 Square(&f1, f1);
318 }
319 Mul(out, f1, f3); // 2**224 - 2**96 - 1
320 }
321
322 // Contract converts a FieldElement to its minimal, distinguished form.
323 //
324 // On entry, in[i] < 2**29
325 // On exit, in[i] < 2**28
Contract(FieldElement * inout)326 void Contract(FieldElement* inout) {
327 FieldElement& out = *inout;
328
329 // Reduce the coefficients to < 2**28.
330 for (int i = 0; i < 7; i++) {
331 out[i+1] += out[i] >> 28;
332 out[i] &= kBottom28Bits;
333 }
334 uint32_t top = out[7] >> 28;
335 out[7] &= kBottom28Bits;
336
337 // Eliminate top while maintaining the same value mod p.
338 out[0] -= top;
339 out[3] += top << 12;
340
341 // We may just have made out[0] negative. So we carry down. If we made
342 // out[0] negative then we know that out[3] is sufficiently positive
343 // because we just added to it.
344 for (int i = 0; i < 3; i++) {
345 uint32_t mask = static_cast<uint32_t>(static_cast<int32_t>(out[i]) >> 31);
346 out[i] += (1 << 28) & mask;
347 out[i+1] -= 1 & mask;
348 }
349
350 // We might have pushed out[3] over 2**28 so we perform another, partial
351 // carry chain.
352 for (int i = 3; i < 7; i++) {
353 out[i+1] += out[i] >> 28;
354 out[i] &= kBottom28Bits;
355 }
356 top = out[7] >> 28;
357 out[7] &= kBottom28Bits;
358
359 // Eliminate top while maintaining the same value mod p.
360 out[0] -= top;
361 out[3] += top << 12;
362
363 // There are two cases to consider for out[3]:
364 // 1) The first time that we eliminated top, we didn't push out[3] over
365 // 2**28. In this case, the partial carry chain didn't change any values
366 // and top is zero.
367 // 2) We did push out[3] over 2**28 the first time that we eliminated top.
368 // The first value of top was in [0..16), therefore, prior to eliminating
369 // the first top, 0xfff1000 <= out[3] <= 0xfffffff. Therefore, after
370 // overflowing and being reduced by the second carry chain, out[3] <=
371 // 0xf000. Thus it cannot have overflowed when we eliminated top for the
372 // second time.
373
374 // Again, we may just have made out[0] negative, so do the same carry down.
375 // As before, if we made out[0] negative then we know that out[3] is
376 // sufficiently positive.
377 for (int i = 0; i < 3; i++) {
378 uint32_t mask = static_cast<uint32_t>(static_cast<int32_t>(out[i]) >> 31);
379 out[i] += (1 << 28) & mask;
380 out[i+1] -= 1 & mask;
381 }
382
383 // The value is < 2**224, but maybe greater than p. In order to reduce to a
384 // unique, minimal value we see if the value is >= p and, if so, subtract p.
385
386 // First we build a mask from the top four limbs, which must all be
387 // equal to bottom28Bits if the whole value is >= p. If top_4_all_ones
388 // ends up with any zero bits in the bottom 28 bits, then this wasn't
389 // true.
390 uint32_t top_4_all_ones = 0xffffffffu;
391 for (int i = 4; i < 8; i++) {
392 top_4_all_ones &= out[i];
393 }
394 top_4_all_ones |= 0xf0000000;
395 // Now we replicate any zero bits to all the bits in top_4_all_ones.
396 top_4_all_ones &= top_4_all_ones >> 16;
397 top_4_all_ones &= top_4_all_ones >> 8;
398 top_4_all_ones &= top_4_all_ones >> 4;
399 top_4_all_ones &= top_4_all_ones >> 2;
400 top_4_all_ones &= top_4_all_ones >> 1;
401 top_4_all_ones =
402 static_cast<uint32_t>(static_cast<int32_t>(top_4_all_ones << 31) >> 31);
403
404 // Now we test whether the bottom three limbs are non-zero.
405 uint32_t bottom_3_non_zero = out[0] | out[1] | out[2];
406 bottom_3_non_zero |= bottom_3_non_zero >> 16;
407 bottom_3_non_zero |= bottom_3_non_zero >> 8;
408 bottom_3_non_zero |= bottom_3_non_zero >> 4;
409 bottom_3_non_zero |= bottom_3_non_zero >> 2;
410 bottom_3_non_zero |= bottom_3_non_zero >> 1;
411 bottom_3_non_zero =
412 static_cast<uint32_t>(static_cast<int32_t>(bottom_3_non_zero) >> 31);
413
414 // Everything depends on the value of out[3].
415 // If it's > 0xffff000 and top_4_all_ones != 0 then the whole value is >= p
416 // If it's = 0xffff000 and top_4_all_ones != 0 and bottom_3_non_zero != 0,
417 // then the whole value is >= p
418 // If it's < 0xffff000, then the whole value is < p
419 uint32_t n = out[3] - 0xffff000;
420 uint32_t out_3_equal = n;
421 out_3_equal |= out_3_equal >> 16;
422 out_3_equal |= out_3_equal >> 8;
423 out_3_equal |= out_3_equal >> 4;
424 out_3_equal |= out_3_equal >> 2;
425 out_3_equal |= out_3_equal >> 1;
426 out_3_equal =
427 ~static_cast<uint32_t>(static_cast<int32_t>(out_3_equal << 31) >> 31);
428
429 // If out[3] > 0xffff000 then n's MSB will be zero.
430 uint32_t out_3_gt =
431 ~static_cast<uint32_t>(static_cast<int32_t>(n << 31) >> 31);
432
433 uint32_t mask =
434 top_4_all_ones & ((out_3_equal & bottom_3_non_zero) | out_3_gt);
435 out[0] -= 1 & mask;
436 out[3] -= 0xffff000 & mask;
437 out[4] -= 0xfffffff & mask;
438 out[5] -= 0xfffffff & mask;
439 out[6] -= 0xfffffff & mask;
440 out[7] -= 0xfffffff & mask;
441 }
442
443
444 // Group element functions.
445 //
446 // These functions deal with group elements. The group is an elliptic curve
447 // group with a = -3 defined in FIPS 186-3, section D.2.2.
448
449 using crypto::p224::Point;
450
451 // kB is parameter of the elliptic curve.
452 const FieldElement kB = {
453 55967668, 11768882, 265861671, 185302395,
454 39211076, 180311059, 84673715, 188764328,
455 };
456
457 void CopyConditional(Point* out, const Point& a, uint32_t mask);
458 void DoubleJacobian(Point* out, const Point& a);
459
460 // AddJacobian computes *out = a+b where a != b.
AddJacobian(Point * out,const Point & a,const Point & b)461 void AddJacobian(Point *out,
462 const Point& a,
463 const Point& b) {
464 // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
465 FieldElement z1z1, z2z2, u1, u2, s1, s2, h, i, j, r, v;
466
467 uint32_t z1_is_zero = IsZero(a.z);
468 uint32_t z2_is_zero = IsZero(b.z);
469
470 // Z1Z1 = Z1²
471 Square(&z1z1, a.z);
472
473 // Z2Z2 = Z2²
474 Square(&z2z2, b.z);
475
476 // U1 = X1*Z2Z2
477 Mul(&u1, a.x, z2z2);
478
479 // U2 = X2*Z1Z1
480 Mul(&u2, b.x, z1z1);
481
482 // S1 = Y1*Z2*Z2Z2
483 Mul(&s1, b.z, z2z2);
484 Mul(&s1, a.y, s1);
485
486 // S2 = Y2*Z1*Z1Z1
487 Mul(&s2, a.z, z1z1);
488 Mul(&s2, b.y, s2);
489
490 // H = U2-U1
491 Subtract(&h, u2, u1);
492 Reduce(&h);
493 uint32_t x_equal = IsZero(h);
494
495 // I = (2*H)²
496 for (int k = 0; k < 8; k++) {
497 i[k] = h[k] << 1;
498 }
499 Reduce(&i);
500 Square(&i, i);
501
502 // J = H*I
503 Mul(&j, h, i);
504 // r = 2*(S2-S1)
505 Subtract(&r, s2, s1);
506 Reduce(&r);
507 uint32_t y_equal = IsZero(r);
508
509 if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
510 // The two input points are the same therefore we must use the dedicated
511 // doubling function as the slope of the line is undefined.
512 DoubleJacobian(out, a);
513 return;
514 }
515
516 for (int k = 0; k < 8; k++) {
517 r[k] <<= 1;
518 }
519 Reduce(&r);
520
521 // V = U1*I
522 Mul(&v, u1, i);
523
524 // Z3 = ((Z1+Z2)²-Z1Z1-Z2Z2)*H
525 Add(&z1z1, z1z1, z2z2);
526 Add(&z2z2, a.z, b.z);
527 Reduce(&z2z2);
528 Square(&z2z2, z2z2);
529 Subtract(&out->z, z2z2, z1z1);
530 Reduce(&out->z);
531 Mul(&out->z, out->z, h);
532
533 // X3 = r²-J-2*V
534 for (int k = 0; k < 8; k++) {
535 z1z1[k] = v[k] << 1;
536 }
537 Add(&z1z1, j, z1z1);
538 Reduce(&z1z1);
539 Square(&out->x, r);
540 Subtract(&out->x, out->x, z1z1);
541 Reduce(&out->x);
542
543 // Y3 = r*(V-X3)-2*S1*J
544 for (int k = 0; k < 8; k++) {
545 s1[k] <<= 1;
546 }
547 Mul(&s1, s1, j);
548 Subtract(&z1z1, v, out->x);
549 Reduce(&z1z1);
550 Mul(&z1z1, z1z1, r);
551 Subtract(&out->y, z1z1, s1);
552 Reduce(&out->y);
553
554 CopyConditional(out, a, z2_is_zero);
555 CopyConditional(out, b, z1_is_zero);
556 }
557
558 // DoubleJacobian computes *out = a+a.
DoubleJacobian(Point * out,const Point & a)559 void DoubleJacobian(Point* out, const Point& a) {
560 // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
561 FieldElement delta, gamma, beta, alpha, t;
562
563 Square(&delta, a.z);
564 Square(&gamma, a.y);
565 Mul(&beta, a.x, gamma);
566
567 // alpha = 3*(X1-delta)*(X1+delta)
568 Add(&t, a.x, delta);
569 for (int i = 0; i < 8; i++) {
570 t[i] += t[i] << 1;
571 }
572 Reduce(&t);
573 Subtract(&alpha, a.x, delta);
574 Reduce(&alpha);
575 Mul(&alpha, alpha, t);
576
577 // Z3 = (Y1+Z1)²-gamma-delta
578 Add(&out->z, a.y, a.z);
579 Reduce(&out->z);
580 Square(&out->z, out->z);
581 Subtract(&out->z, out->z, gamma);
582 Reduce(&out->z);
583 Subtract(&out->z, out->z, delta);
584 Reduce(&out->z);
585
586 // X3 = alpha²-8*beta
587 for (int i = 0; i < 8; i++) {
588 delta[i] = beta[i] << 3;
589 }
590 Reduce(&delta);
591 Square(&out->x, alpha);
592 Subtract(&out->x, out->x, delta);
593 Reduce(&out->x);
594
595 // Y3 = alpha*(4*beta-X3)-8*gamma²
596 for (int i = 0; i < 8; i++) {
597 beta[i] <<= 2;
598 }
599 Reduce(&beta);
600 Subtract(&beta, beta, out->x);
601 Reduce(&beta);
602 Square(&gamma, gamma);
603 for (int i = 0; i < 8; i++) {
604 gamma[i] <<= 3;
605 }
606 Reduce(&gamma);
607 Mul(&out->y, alpha, beta);
608 Subtract(&out->y, out->y, gamma);
609 Reduce(&out->y);
610 }
611
612 // CopyConditional sets *out=a if mask is 0xffffffff. mask must be either 0 of
613 // 0xffffffff.
CopyConditional(Point * out,const Point & a,uint32_t mask)614 void CopyConditional(Point* out, const Point& a, uint32_t mask) {
615 for (int i = 0; i < 8; i++) {
616 out->x[i] ^= mask & (a.x[i] ^ out->x[i]);
617 out->y[i] ^= mask & (a.y[i] ^ out->y[i]);
618 out->z[i] ^= mask & (a.z[i] ^ out->z[i]);
619 }
620 }
621
622 // ScalarMult calculates *out = a*scalar where scalar is a big-endian number of
623 // length scalar_len and != 0.
ScalarMult(Point * out,const Point & a,const uint8_t * scalar,size_t scalar_len)624 void ScalarMult(Point* out,
625 const Point& a,
626 const uint8_t* scalar,
627 size_t scalar_len) {
628 memset(out, 0, sizeof(*out));
629 Point tmp;
630
631 for (size_t i = 0; i < scalar_len; i++) {
632 for (unsigned int bit_num = 0; bit_num < 8; bit_num++) {
633 DoubleJacobian(out, *out);
634 uint32_t bit = static_cast<uint32_t>(static_cast<int32_t>(
635 (((scalar[i] >> (7 - bit_num)) & 1) << 31) >> 31));
636 AddJacobian(&tmp, a, *out);
637 CopyConditional(out, tmp, bit);
638 }
639 }
640 }
641
642 // Get224Bits reads 7 words from in and scatters their contents in
643 // little-endian form into 8 words at out, 28 bits per output word.
Get224Bits(uint32_t * out,const uint32_t * in)644 void Get224Bits(uint32_t* out, const uint32_t* in) {
645 out[0] = NetToHost32(in[6]) & kBottom28Bits;
646 out[1] = ((NetToHost32(in[5]) << 4) |
647 (NetToHost32(in[6]) >> 28)) & kBottom28Bits;
648 out[2] = ((NetToHost32(in[4]) << 8) |
649 (NetToHost32(in[5]) >> 24)) & kBottom28Bits;
650 out[3] = ((NetToHost32(in[3]) << 12) |
651 (NetToHost32(in[4]) >> 20)) & kBottom28Bits;
652 out[4] = ((NetToHost32(in[2]) << 16) |
653 (NetToHost32(in[3]) >> 16)) & kBottom28Bits;
654 out[5] = ((NetToHost32(in[1]) << 20) |
655 (NetToHost32(in[2]) >> 12)) & kBottom28Bits;
656 out[6] = ((NetToHost32(in[0]) << 24) |
657 (NetToHost32(in[1]) >> 8)) & kBottom28Bits;
658 out[7] = (NetToHost32(in[0]) >> 4) & kBottom28Bits;
659 }
660
661 // Put224Bits performs the inverse operation to Get224Bits: taking 28 bits from
662 // each of 8 input words and writing them in big-endian order to 7 words at
663 // out.
Put224Bits(uint32_t * out,const uint32_t * in)664 void Put224Bits(uint32_t* out, const uint32_t* in) {
665 out[6] = HostToNet32((in[0] >> 0) | (in[1] << 28));
666 out[5] = HostToNet32((in[1] >> 4) | (in[2] << 24));
667 out[4] = HostToNet32((in[2] >> 8) | (in[3] << 20));
668 out[3] = HostToNet32((in[3] >> 12) | (in[4] << 16));
669 out[2] = HostToNet32((in[4] >> 16) | (in[5] << 12));
670 out[1] = HostToNet32((in[5] >> 20) | (in[6] << 8));
671 out[0] = HostToNet32((in[6] >> 24) | (in[7] << 4));
672 }
673
674 } // anonymous namespace
675
676 namespace crypto {
677
678 namespace p224 {
679
SetFromString(const base::StringPiece & in)680 bool Point::SetFromString(const base::StringPiece& in) {
681 if (in.size() != 2*28)
682 return false;
683 const uint32_t* inwords = reinterpret_cast<const uint32_t*>(in.data());
684 Get224Bits(x, inwords);
685 Get224Bits(y, inwords + 7);
686 memset(&z, 0, sizeof(z));
687 z[0] = 1;
688
689 // Check that the point is on the curve, i.e. that y² = x³ - 3x + b.
690 FieldElement lhs;
691 Square(&lhs, y);
692 Contract(&lhs);
693
694 FieldElement rhs;
695 Square(&rhs, x);
696 Mul(&rhs, x, rhs);
697
698 FieldElement three_x;
699 for (int i = 0; i < 8; i++) {
700 three_x[i] = x[i] * 3;
701 }
702 Reduce(&three_x);
703 Subtract(&rhs, rhs, three_x);
704 Reduce(&rhs);
705
706 ::Add(&rhs, rhs, kB);
707 Contract(&rhs);
708 return memcmp(&lhs, &rhs, sizeof(lhs)) == 0;
709 }
710
ToString() const711 std::string Point::ToString() const {
712 FieldElement zinv, zinv_sq, xx, yy;
713
714 // If this is the point at infinity we return a string of all zeros.
715 if (IsZero(this->z)) {
716 static const char zeros[56] = {0};
717 return std::string(zeros, sizeof(zeros));
718 }
719
720 Invert(&zinv, this->z);
721 Square(&zinv_sq, zinv);
722 Mul(&xx, x, zinv_sq);
723 Mul(&zinv_sq, zinv_sq, zinv);
724 Mul(&yy, y, zinv_sq);
725
726 Contract(&xx);
727 Contract(&yy);
728
729 uint32_t outwords[14];
730 Put224Bits(outwords, xx);
731 Put224Bits(outwords + 7, yy);
732 return std::string(reinterpret_cast<const char*>(outwords), sizeof(outwords));
733 }
734
ScalarMult(const Point & in,const uint8_t * scalar,Point * out)735 void ScalarMult(const Point& in, const uint8_t* scalar, Point* out) {
736 ::ScalarMult(out, in, scalar, 28);
737 }
738
739 // kBasePoint is the base point (generator) of the elliptic curve group.
740 static const Point kBasePoint = {
741 {22813985, 52956513, 34677300, 203240812,
742 12143107, 133374265, 225162431, 191946955},
743 {83918388, 223877528, 122119236, 123340192,
744 266784067, 263504429, 146143011, 198407736},
745 {1, 0, 0, 0, 0, 0, 0, 0},
746 };
747
ScalarBaseMult(const uint8_t * scalar,Point * out)748 void ScalarBaseMult(const uint8_t* scalar, Point* out) {
749 ::ScalarMult(out, kBasePoint, scalar, 28);
750 }
751
Add(const Point & a,const Point & b,Point * out)752 void Add(const Point& a, const Point& b, Point* out) {
753 AddJacobian(out, a, b);
754 }
755
Negate(const Point & in,Point * out)756 void Negate(const Point& in, Point* out) {
757 // Guide to elliptic curve cryptography, page 89 suggests that (X : X+Y : Z)
758 // is the negative in Jacobian coordinates, but it doesn't actually appear to
759 // be true in testing so this performs the negation in affine coordinates.
760 FieldElement zinv, zinv_sq, y;
761 Invert(&zinv, in.z);
762 Square(&zinv_sq, zinv);
763 Mul(&out->x, in.x, zinv_sq);
764 Mul(&zinv_sq, zinv_sq, zinv);
765 Mul(&y, in.y, zinv_sq);
766
767 Subtract(&out->y, kP, y);
768 Reduce(&out->y);
769
770 memset(&out->z, 0, sizeof(out->z));
771 out->z[0] = 1;
772 }
773
774 } // namespace p224
775
776 } // namespace crypto
777