github.com/aquanetwork/aquachain@v1.7.8/crypto/secp256k1/libsecp256k1/src/ecmult_const_impl.h (about) 1 /********************************************************************** 2 * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.* 5 **********************************************************************/ 6 7 #ifndef SECP256K1_ECMULT_CONST_IMPL_H 8 #define SECP256K1_ECMULT_CONST_IMPL_H 9 10 #include "scalar.h" 11 #include "group.h" 12 #include "ecmult_const.h" 13 #include "ecmult_impl.h" 14 15 /* This is like `ECMULT_TABLE_GET_GE` but is constant time */ 16 #define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ 17 int m; \ 18 int abs_n = (n) * (((n) > 0) * 2 - 1); \ 19 int idx_n = abs_n / 2; \ 20 secp256k1_fe neg_y; \ 21 VERIFY_CHECK(((n) & 1) == 1); \ 22 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ 23 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ 24 VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ 25 VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ 26 for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \ 27 /* This loop is used to avoid secret data in array indices. See 28 * the comment in ecmult_gen_impl.h for rationale. */ \ 29 secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ 30 secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ 31 } \ 32 (r)->infinity = 0; \ 33 secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ 34 secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ 35 } while(0) 36 37 38 /** Convert a number to WNAF notation. 39 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. 40 * It has the following guarantees: 41 * - each wnaf[i] an odd integer between -(1 << w) and (1 << w) 42 * - each wnaf[i] is nonzero 43 * - the number of words set is always WNAF_SIZE(w) + 1 44 * 45 * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar 46 * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) 47 * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003 48 * 49 * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 50 */ 51 static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) { 52 int global_sign; 53 int skew = 0; 54 int word = 0; 55 56 /* 1 2 3 */ 57 int u_last; 58 int u; 59 60 int flip; 61 int bit; 62 secp256k1_scalar neg_s; 63 int not_neg_one; 64 /* Note that we cannot handle even numbers by negating them to be odd, as is 65 * done in other implementations, since if our scalars were specified to have 66 * width < 256 for performance reasons, their negations would have width 256 67 * and we'd lose any performance benefit. Instead, we use a technique from 68 * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) 69 * or 2 (for odd) to the number we are encoding, returning a skew value indicating 70 * this, and having the caller compensate after doing the multiplication. 71 * 72 * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in 73 * particular, to ensure that the outputs from the endomorphism-split fit into 74 * 128 bits). If we negate, the parity of our number flips, inverting which of 75 * {1, 2} we want to add to the scalar when ensuring that it's odd. Further 76 * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and 77 * we need to special-case it in this logic. */ 78 flip = secp256k1_scalar_is_high(&s); 79 /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ 80 bit = flip ^ !secp256k1_scalar_is_even(&s); 81 /* We check for negative one, since adding 2 to it will cause an overflow */ 82 secp256k1_scalar_negate(&neg_s, &s); 83 not_neg_one = !secp256k1_scalar_is_one(&neg_s); 84 secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); 85 /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects 86 * that we added two to it and flipped it. In fact for -1 these operations are 87 * identical. We only flipped, but since skewing is required (in the sense that 88 * the skew must be 1 or 2, never zero) and flipping is not, we need to change 89 * our flags to claim that we only skewed. */ 90 global_sign = secp256k1_scalar_cond_negate(&s, flip); 91 global_sign *= not_neg_one * 2 - 1; 92 skew = 1 << bit; 93 94 /* 4 */ 95 u_last = secp256k1_scalar_shr_int(&s, w); 96 while (word * w < size) { 97 int sign; 98 int even; 99 100 /* 4.1 4.4 */ 101 u = secp256k1_scalar_shr_int(&s, w); 102 /* 4.2 */ 103 even = ((u & 1) == 0); 104 sign = 2 * (u_last > 0) - 1; 105 u += sign * even; 106 u_last -= sign * even * (1 << w); 107 108 /* 4.3, adapted for global sign change */ 109 wnaf[word++] = u_last * global_sign; 110 111 u_last = u; 112 } 113 wnaf[word] = u * global_sign; 114 115 VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); 116 VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); 117 return skew; 118 } 119 120 static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) { 121 secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; 122 secp256k1_ge tmpa; 123 secp256k1_fe Z; 124 125 int skew_1; 126 #ifdef USE_ENDOMORPHISM 127 secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; 128 int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; 129 int skew_lam; 130 secp256k1_scalar q_1, q_lam; 131 #endif 132 int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; 133 134 int i; 135 secp256k1_scalar sc = *scalar; 136 137 /* build wnaf representation for q. */ 138 int rsize = size; 139 #ifdef USE_ENDOMORPHISM 140 if (size > 128) { 141 rsize = 128; 142 /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ 143 secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); 144 skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1, 128); 145 skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1, 128); 146 } else 147 #endif 148 { 149 skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1, size); 150 #ifdef USE_ENDOMORPHISM 151 skew_lam = 0; 152 #endif 153 } 154 155 /* Calculate odd multiples of a. 156 * All multiples are brought to the same Z 'denominator', which is stored 157 * in Z. Due to secp256k1' isomorphism we can do all operations pretending 158 * that the Z coordinate was 1, use affine addition formulae, and correct 159 * the Z coordinate of the result once at the end. 160 */ 161 secp256k1_gej_set_ge(r, a); 162 secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); 163 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { 164 secp256k1_fe_normalize_weak(&pre_a[i].y); 165 } 166 #ifdef USE_ENDOMORPHISM 167 if (size > 128) { 168 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { 169 secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); 170 } 171 } 172 #endif 173 174 /* first loop iteration (separated out so we can directly set r, rather 175 * than having it start at infinity, get doubled several times, then have 176 * its new value added to it) */ 177 i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; 178 VERIFY_CHECK(i != 0); 179 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); 180 secp256k1_gej_set_ge(r, &tmpa); 181 #ifdef USE_ENDOMORPHISM 182 if (size > 128) { 183 i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; 184 VERIFY_CHECK(i != 0); 185 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); 186 secp256k1_gej_add_ge(r, r, &tmpa); 187 } 188 #endif 189 /* remaining loop iterations */ 190 for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) { 191 int n; 192 int j; 193 for (j = 0; j < WINDOW_A - 1; ++j) { 194 secp256k1_gej_double_nonzero(r, r, NULL); 195 } 196 197 n = wnaf_1[i]; 198 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); 199 VERIFY_CHECK(n != 0); 200 secp256k1_gej_add_ge(r, r, &tmpa); 201 #ifdef USE_ENDOMORPHISM 202 if (size > 128) { 203 n = wnaf_lam[i]; 204 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); 205 VERIFY_CHECK(n != 0); 206 secp256k1_gej_add_ge(r, r, &tmpa); 207 } 208 #endif 209 } 210 211 secp256k1_fe_mul(&r->z, &r->z, &Z); 212 213 { 214 /* Correct for wNAF skew */ 215 secp256k1_ge correction = *a; 216 secp256k1_ge_storage correction_1_stor; 217 #ifdef USE_ENDOMORPHISM 218 secp256k1_ge_storage correction_lam_stor; 219 #endif 220 secp256k1_ge_storage a2_stor; 221 secp256k1_gej tmpj; 222 secp256k1_gej_set_ge(&tmpj, &correction); 223 secp256k1_gej_double_var(&tmpj, &tmpj, NULL); 224 secp256k1_ge_set_gej(&correction, &tmpj); 225 secp256k1_ge_to_storage(&correction_1_stor, a); 226 #ifdef USE_ENDOMORPHISM 227 if (size > 128) { 228 secp256k1_ge_to_storage(&correction_lam_stor, a); 229 } 230 #endif 231 secp256k1_ge_to_storage(&a2_stor, &correction); 232 233 /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ 234 secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); 235 #ifdef USE_ENDOMORPHISM 236 if (size > 128) { 237 secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); 238 } 239 #endif 240 241 /* Apply the correction */ 242 secp256k1_ge_from_storage(&correction, &correction_1_stor); 243 secp256k1_ge_neg(&correction, &correction); 244 secp256k1_gej_add_ge(r, r, &correction); 245 246 #ifdef USE_ENDOMORPHISM 247 if (size > 128) { 248 secp256k1_ge_from_storage(&correction, &correction_lam_stor); 249 secp256k1_ge_neg(&correction, &correction); 250 secp256k1_ge_mul_lambda(&correction, &correction); 251 secp256k1_gej_add_ge(r, r, &correction); 252 } 253 #endif 254 } 255 } 256 257 #endif /* SECP256K1_ECMULT_CONST_IMPL_H */