1 /* Copyright 2016 Brian Smith.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
15 #include "../../limbs/limbs.h"
16
17 #include "ecp_nistz384.h"
18 #include "../bn/internal.h"
19 #include "../../internal.h"
20
21 #include "../../limbs/limbs.inl"
22
23 /* XXX: Here we assume that the conversion from |Carry| to |Limb| is
24 * constant-time, but we haven't verified that assumption. TODO: Fix it so
25 * we don't need to make that assumption. */
26
27
28 typedef Limb Elem[P384_LIMBS];
29 typedef Limb ScalarMont[P384_LIMBS];
30 typedef Limb Scalar[P384_LIMBS];
31
32
33 static const BN_ULONG Q[P384_LIMBS] = {
34 TOBN(0x00000000, 0xffffffff),
35 TOBN(0xffffffff, 0x00000000),
36 TOBN(0xffffffff, 0xfffffffe),
37 TOBN(0xffffffff, 0xffffffff),
38 TOBN(0xffffffff, 0xffffffff),
39 TOBN(0xffffffff, 0xffffffff),
40 };
41
42 static const BN_ULONG N[P384_LIMBS] = {
43 TOBN(0xecec196a, 0xccc52973),
44 TOBN(0x581a0db2, 0x48b0a77a),
45 TOBN(0xc7634d81, 0xf4372ddf),
46 TOBN(0xffffffff, 0xffffffff),
47 TOBN(0xffffffff, 0xffffffff),
48 TOBN(0xffffffff, 0xffffffff),
49 };
50
51
52 static const BN_ULONG ONE[P384_LIMBS] = {
53 TOBN(0xffffffff, 1), TOBN(0, 0xffffffff), TOBN(0, 1), TOBN(0, 0), TOBN(0, 0),
54 TOBN(0, 0),
55 };
56
57
58 /* XXX: MSVC for x86 warns when it fails to inline these functions it should
59 * probably inline. */
60 #if defined(_MSC_VER) && !defined(__clang__) && defined(OPENSSL_X86)
61 #define INLINE_IF_POSSIBLE __forceinline
62 #else
63 #define INLINE_IF_POSSIBLE inline
64 #endif
65
is_equal(const Elem a,const Elem b)66 static inline Limb is_equal(const Elem a, const Elem b) {
67 return LIMBS_equal(a, b, P384_LIMBS);
68 }
69
is_zero(const BN_ULONG a[P384_LIMBS])70 static inline Limb is_zero(const BN_ULONG a[P384_LIMBS]) {
71 return LIMBS_are_zero(a, P384_LIMBS);
72 }
73
copy_conditional(Elem r,const Elem a,const Limb condition)74 static inline void copy_conditional(Elem r, const Elem a,
75 const Limb condition) {
76 for (size_t i = 0; i < P384_LIMBS; ++i) {
77 r[i] = constant_time_select_w(condition, a[i], r[i]);
78 }
79 }
80
81
elem_add(Elem r,const Elem a,const Elem b)82 static inline void elem_add(Elem r, const Elem a, const Elem b) {
83 LIMBS_add_mod(r, a, b, Q, P384_LIMBS);
84 }
85
elem_sub(Elem r,const Elem a,const Elem b)86 static inline void elem_sub(Elem r, const Elem a, const Elem b) {
87 LIMBS_sub_mod(r, a, b, Q, P384_LIMBS);
88 }
89
elem_div_by_2(Elem r,const Elem a)90 static void elem_div_by_2(Elem r, const Elem a) {
91 /* Consider the case where `a` is even. Then we can shift `a` right one bit
92 * and the result will still be valid because we didn't lose any bits and so
93 * `(a >> 1) * 2 == a (mod q)`, which is the invariant we must satisfy.
94 *
95 * The remainder of this comment is considering the case where `a` is odd.
96 *
97 * Since `a` is odd, it isn't the case that `(a >> 1) * 2 == a (mod q)`
98 * because the lowest bit is lost during the shift. For example, consider:
99 *
100 * ```python
101 * q = 2**384 - 2**128 - 2**96 + 2**32 - 1
102 * a = 2**383
103 * two_a = a * 2 % q
104 * assert two_a == 0x100000000ffffffffffffffff00000001
105 * ```
106 *
107 * Notice there how `(2 * a) % q` wrapped around to a smaller odd value. When
108 * we divide `two_a` by two (mod q), we need to get the value `2**383`, which
109 * we obviously can't get with just a right shift.
110 *
111 * `q` is odd, and `a` is odd, so `a + q` is even. We could calculate
112 * `(a + q) >> 1` and then reduce it mod `q`. However, then we would have to
113 * keep track of an extra most significant bit. We can avoid that by instead
114 * calculating `(a >> 1) + ((q + 1) >> 1)`. The `1` in `q + 1` is the least
115 * significant bit of `a`. `q + 1` is even, which means it can be shifted
116 * without losing any bits. Since `q` is odd, `q - 1` is even, so the largest
117 * odd field element is `q - 2`. Thus we know that `a <= q - 2`. We know
118 * `(q + 1) >> 1` is `(q + 1) / 2` since (`q + 1`) is even. The value of
119 * `a >> 1` is `(a - 1)/2` since the shift will drop the least significant
120 * bit of `a`, which is 1. Thus:
121 *
122 * sum = ((q + 1) >> 1) + (a >> 1)
123 * sum = (q + 1)/2 + (a >> 1) (substituting (q + 1)/2)
124 * <= (q + 1)/2 + (q - 2 - 1)/2 (substituting a <= q - 2)
125 * <= (q + 1)/2 + (q - 3)/2 (simplifying)
126 * <= (q + 1 + q - 3)/2 (factoring out the common divisor)
127 * <= (2q - 2)/2 (simplifying)
128 * <= q - 1 (simplifying)
129 *
130 * Thus, no reduction of the sum mod `q` is necessary. */
131
132 Limb is_odd = constant_time_is_nonzero_w(a[0] & 1);
133
134 /* r = a >> 1. */
135 Limb carry = a[P384_LIMBS - 1] & 1;
136 r[P384_LIMBS - 1] = a[P384_LIMBS - 1] >> 1;
137 for (size_t i = 1; i < P384_LIMBS; ++i) {
138 Limb new_carry = a[P384_LIMBS - i - 1];
139 r[P384_LIMBS - i - 1] =
140 (a[P384_LIMBS - i - 1] >> 1) | (carry << (LIMB_BITS - 1));
141 carry = new_carry;
142 }
143
144 static const Elem Q_PLUS_1_SHR_1 = {
145 TOBN(0x00000000, 0x80000000), TOBN(0x7fffffff, 0x80000000),
146 TOBN(0xffffffff, 0xffffffff), TOBN(0xffffffff, 0xffffffff),
147 TOBN(0xffffffff, 0xffffffff), TOBN(0x7fffffff, 0xffffffff),
148 };
149
150 Elem adjusted;
151 BN_ULONG carry2 = limbs_add(adjusted, r, Q_PLUS_1_SHR_1, P384_LIMBS);
152 dev_assert_secret(carry2 == 0);
153 (void)carry2;
154 copy_conditional(r, adjusted, is_odd);
155 }
156
elem_mul_mont(Elem r,const Elem a,const Elem b)157 static inline void elem_mul_mont(Elem r, const Elem a, const Elem b) {
158 static const BN_ULONG Q_N0[] = {
159 BN_MONT_CTX_N0(0x1, 0x1)
160 };
161 /* XXX: Not (clearly) constant-time; inefficient.*/
162 GFp_bn_mul_mont(r, a, b, Q, Q_N0, P384_LIMBS);
163 }
164
elem_mul_by_2(Elem r,const Elem a)165 static inline void elem_mul_by_2(Elem r, const Elem a) {
166 LIMBS_shl_mod(r, a, Q, P384_LIMBS);
167 }
168
elem_mul_by_3(Elem r,const Elem a)169 static INLINE_IF_POSSIBLE void elem_mul_by_3(Elem r, const Elem a) {
170 /* XXX: inefficient. TODO: Replace with an integrated shift + add. */
171 Elem doubled;
172 elem_add(doubled, a, a);
173 elem_add(r, doubled, a);
174 }
175
elem_sqr_mont(Elem r,const Elem a)176 static inline void elem_sqr_mont(Elem r, const Elem a) {
177 /* XXX: Inefficient. TODO: Add a dedicated squaring routine. */
178 elem_mul_mont(r, a, a);
179 }
180
GFp_p384_elem_add(Elem r,const Elem a,const Elem b)181 void GFp_p384_elem_add(Elem r, const Elem a, const Elem b) {
182 elem_add(r, a, b);
183 }
184
GFp_p384_elem_sub(Elem r,const Elem a,const Elem b)185 void GFp_p384_elem_sub(Elem r, const Elem a, const Elem b) {
186 elem_sub(r, a, b);
187 }
188
GFp_p384_elem_div_by_2(Elem r,const Elem a)189 void GFp_p384_elem_div_by_2(Elem r, const Elem a) {
190 elem_div_by_2(r, a);
191 }
192
GFp_p384_elem_mul_mont(Elem r,const Elem a,const Elem b)193 void GFp_p384_elem_mul_mont(Elem r, const Elem a, const Elem b) {
194 elem_mul_mont(r, a, b);
195 }
196
GFp_p384_elem_neg(Elem r,const Elem a)197 void GFp_p384_elem_neg(Elem r, const Elem a) {
198 Limb is_zero = LIMBS_are_zero(a, P384_LIMBS);
199 Carry borrow = limbs_sub(r, Q, a, P384_LIMBS);
200 dev_assert_secret(borrow == 0);
201 (void)borrow;
202 for (size_t i = 0; i < P384_LIMBS; ++i) {
203 r[i] = constant_time_select_w(is_zero, 0, r[i]);
204 }
205 }
206
207
GFp_p384_scalar_mul_mont(ScalarMont r,const ScalarMont a,const ScalarMont b)208 void GFp_p384_scalar_mul_mont(ScalarMont r, const ScalarMont a,
209 const ScalarMont b) {
210 static const BN_ULONG N_N0[] = {
211 BN_MONT_CTX_N0(0x6ed46089, 0xe88fdc45)
212 };
213 /* XXX: Inefficient. TODO: Add dedicated multiplication routine. */
214 GFp_bn_mul_mont(r, a, b, N, N_N0, P384_LIMBS);
215 }
216
217
218 /* TODO(perf): Optimize this. */
219
gfp_p384_point_select_w5(P384_POINT * out,const P384_POINT table[16],size_t index)220 static void gfp_p384_point_select_w5(P384_POINT *out,
221 const P384_POINT table[16], size_t index) {
222 Elem x; limbs_zero(x, P384_LIMBS);
223 Elem y; limbs_zero(y, P384_LIMBS);
224 Elem z; limbs_zero(z, P384_LIMBS);
225
226 // TODO: Rewrite in terms of |limbs_select|.
227 for (size_t i = 0; i < 16; ++i) {
228 crypto_word equal = constant_time_eq_w(index, (crypto_word)i + 1);
229 for (size_t j = 0; j < P384_LIMBS; ++j) {
230 x[j] = constant_time_select_w(equal, table[i].X[j], x[j]);
231 y[j] = constant_time_select_w(equal, table[i].Y[j], y[j]);
232 z[j] = constant_time_select_w(equal, table[i].Z[j], z[j]);
233 }
234 }
235
236 limbs_copy(out->X, x, P384_LIMBS);
237 limbs_copy(out->Y, y, P384_LIMBS);
238 limbs_copy(out->Z, z, P384_LIMBS);
239 }
240
241
242 #include "ecp_nistz384.inl"
243