1 /* Copyright 2016 Brian Smith.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
15 #include <openssl/bn.h>
16
17 #include <assert.h>
18
19 #include "internal.h"
20 #include "../internal.h"
21
22
23 static uint64_t bn_neg_inv_mod_r_u64(uint64_t n);
24
25 OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2,
26 BN_MONT_CTX_N0_LIMBS_VALUE_INVALID);
27 OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) ==
28 BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG),
29 BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T);
30
31 /* LG_LITTLE_R is log_2(r). */
32 #define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2)
33
bn_mont_n0(const BIGNUM * n)34 uint64_t bn_mont_n0(const BIGNUM *n) {
35 /* These conditions are checked by the caller, |BN_MONT_CTX_set|. */
36 assert(!BN_is_zero(n));
37 assert(!BN_is_negative(n));
38 assert(BN_is_odd(n));
39
40 /* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
41 * ensures that we can do integer division by |r| by simply ignoring
42 * |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
43 * |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
44 * what makes Montgomery multiplication efficient.
45 *
46 * As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
47 * with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
48 * multi-limb Montgomery multiplication of |a * b (mod n)|, given the
49 * unreduced product |t == a * b|, we repeatedly calculate:
50 *
51 * t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
52 * t2 := t1*n0*n
53 * t3 := t + t2
54 * t := t3 / r copy all limbs of |t3| except the lowest to |t|.
55 *
56 * In the last step, it would only make sense to ignore the lowest limb of
57 * |t3| if it were zero. The middle steps ensure that this is the case:
58 *
59 * t3 == 0 (mod r)
60 * t + t2 == 0 (mod r)
61 * t + t1*n0*n == 0 (mod r)
62 * t1*n0*n == -t (mod r)
63 * t*n0*n == -t (mod r)
64 * n0*n == -1 (mod r)
65 * n0 == -1/n (mod r)
66 *
67 * Thus, in each iteration of the loop, we multiply by the constant factor
68 * |n0|, the negative inverse of n (mod r). */
69
70 /* n_mod_r = n % r. As explained above, this is done by taking the lowest
71 * |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */
72 uint64_t n_mod_r = n->d[0];
73 #if BN_MONT_CTX_N0_LIMBS == 2
74 if (n->top > 1) {
75 n_mod_r |= (uint64_t)n->d[1] << BN_BITS2;
76 }
77 #endif
78
79 return bn_neg_inv_mod_r_u64(n_mod_r);
80 }
81
82 /* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
83 * such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
84 * must be odd.
85 *
86 * This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
87 * Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
88 * It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
89 * Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
90 * (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
91 *
92 * This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
93 * (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
94 * constant-time with respect to |n|. We assume uint64_t additions,
95 * subtractions, shifts, and bitwise operations are all constant time, which
96 * may be a large leap of faith on 32-bit targets. We avoid division and
97 * multiplication, which tend to be the most problematic in terms of timing
98 * leaks.
99 *
100 * Most GCD implementations return values such that |u*r + v*n == 1|, so the
101 * caller would have to negate the resultant |v| for the purpose of Montgomery
102 * multiplication. This implementation does the negation implicitly by doing
103 * the computations as a difference instead of a sum. */
bn_neg_inv_mod_r_u64(uint64_t n)104 static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
105 assert(n % 2 == 1);
106
107 /* alpha == 2**(lg r - 1) == r / 2. */
108 static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1);
109
110 const uint64_t beta = n;
111
112 uint64_t u = 1;
113 uint64_t v = 0;
114
115 /* The invariant maintained from here on is:
116 * 2**(lg r - i) == u*2*alpha - v*beta. */
117 for (size_t i = 0; i < LG_LITTLE_R; ++i) {
118 #if BN_BITS2 == 64 && defined(BN_ULLONG)
119 assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) ==
120 ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
121 #endif
122
123 /* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
124 * |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */
125
126 uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */
127
128 /* The addition can overflow, so use Dietz's method for it.
129 *
130 * Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
131 * (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
132 * (embedded in 64 bits to so that overflow can be ignored):
133 *
134 * (declare-fun x () (_ BitVec 64))
135 * (declare-fun y () (_ BitVec 64))
136 * (assert (let (
137 * (one (_ bv1 64))
138 * (thirtyTwo (_ bv32 64)))
139 * (and
140 * (bvult x (bvshl one thirtyTwo))
141 * (bvult y (bvshl one thirtyTwo))
142 * (not (=
143 * (bvadd (bvlshr (bvxor x y) one) (bvand x y))
144 * (bvlshr (bvadd x y) one)))
145 * )))
146 * (check-sat) */
147 uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */
148 u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd);
149
150 uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */
151 v = (v >> 1) + alpha_if_u_is_odd;
152 }
153
154 /* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */
155 #if BN_BITS2 == 64 && defined(BN_ULLONG)
156 assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
157 #endif
158
159 return v;
160 }
161
162 /* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
163 * than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
164 * odd. */
bn_mod_exp_base_2_vartime(BIGNUM * r,unsigned p,const BIGNUM * n)165 int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
166 assert(!BN_is_zero(n));
167 assert(!BN_is_negative(n));
168 assert(BN_is_odd(n));
169
170 BN_zero(r);
171
172 unsigned n_bits = BN_num_bits(n);
173 assert(n_bits != 0);
174 if (n_bits == 1) {
175 return 1;
176 }
177
178 /* Set |r| to the smallest power of two larger than |n|. */
179 assert(p > n_bits);
180 if (!BN_set_bit(r, n_bits)) {
181 return 0;
182 }
183
184 /* Unconditionally reduce |r|. */
185 assert(BN_cmp(r, n) > 0);
186 if (!BN_usub(r, r, n)) {
187 return 0;
188 }
189 assert(BN_cmp(r, n) < 0);
190
191 for (unsigned i = n_bits; i < p; ++i) {
192 /* This is like |BN_mod_lshift1_quick| except using |BN_usub|.
193 *
194 * TODO: Replace this with the use of a constant-time variant of
195 * |BN_mod_lshift1_quick|. */
196 if (!BN_lshift1(r, r)) {
197 return 0;
198 }
199 if (BN_cmp(r, n) >= 0) {
200 if (!BN_usub(r, r, n)) {
201 return 0;
202 }
203 }
204 }
205
206 return 1;
207 }
208