github.com/ethereum/go-ethereum@v1.16.1/crypto/secp256k1/libsecp256k1/src/ecmult_impl.h (about) 1 /****************************************************************************** 2 * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or https://www.opensource.org/licenses/mit-license.php. * 5 ******************************************************************************/ 6 7 #ifndef SECP256K1_ECMULT_IMPL_H 8 #define SECP256K1_ECMULT_IMPL_H 9 10 #include <string.h> 11 #include <stdint.h> 12 13 #include "util.h" 14 #include "group.h" 15 #include "scalar.h" 16 #include "ecmult.h" 17 #include "precomputed_ecmult.h" 18 19 #if defined(EXHAUSTIVE_TEST_ORDER) 20 /* We need to lower these values for exhaustive tests because 21 * the tables cannot have infinities in them (this breaks the 22 * affine-isomorphism stuff which tracks z-ratios) */ 23 # if EXHAUSTIVE_TEST_ORDER > 128 24 # define WINDOW_A 5 25 # elif EXHAUSTIVE_TEST_ORDER > 8 26 # define WINDOW_A 4 27 # else 28 # define WINDOW_A 2 29 # endif 30 #else 31 /* optimal for 128-bit and 256-bit exponents. */ 32 # define WINDOW_A 5 33 /** Larger values for ECMULT_WINDOW_SIZE result in possibly better 34 * performance at the cost of an exponentially larger precomputed 35 * table. The exact table size is 36 * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, 37 * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can 38 * be larger due to platform-specific padding and alignment. 39 * Two tables of this size are used (due to the endomorphism 40 * optimization). 41 */ 42 #endif 43 44 #define WNAF_BITS 128 45 #define WNAF_SIZE_BITS(bits, w) CEIL_DIV(bits, w) 46 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) 47 48 /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ 49 #define PIPPENGER_SCRATCH_OBJECTS 6 50 #define STRAUSS_SCRATCH_OBJECTS 5 51 52 #define PIPPENGER_MAX_BUCKET_WINDOW 12 53 54 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ 55 #define ECMULT_PIPPENGER_THRESHOLD 88 56 57 #define ECMULT_MAX_POINTS_PER_BATCH 5000000 58 59 /** Fill a table 'pre_a' with precomputed odd multiples of a. 60 * pre_a will contain [1*a,3*a,...,(2*n-1)*a], so it needs space for n group elements. 61 * zr needs space for n field elements. 62 * 63 * Although pre_a is an array of _ge rather than _gej, it actually represents elements 64 * in Jacobian coordinates with their z coordinates omitted. The omitted z-coordinates 65 * can be recovered using z and zr. Using the notation z(b) to represent the omitted 66 * z coordinate of b: 67 * - z(pre_a[n-1]) = 'z' 68 * - z(pre_a[i-1]) = z(pre_a[i]) / zr[i] for n > i > 0 69 * 70 * Lastly the zr[0] value, which isn't used above, is set so that: 71 * - a.z = z(pre_a[0]) / zr[0] 72 */ 73 static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_ge *pre_a, secp256k1_fe *zr, secp256k1_fe *z, const secp256k1_gej *a) { 74 secp256k1_gej d, ai; 75 secp256k1_ge d_ge; 76 int i; 77 78 VERIFY_CHECK(!a->infinity); 79 80 secp256k1_gej_double_var(&d, a, NULL); 81 82 /* 83 * Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z. 84 * The isomorphism, phi, maps a secp256k1 point (x, y) to the point (x*C^2, y*C^3) on the other curve. 85 * In Jacobian coordinates phi maps (x, y, z) to (x*C^2, y*C^3, z) or, equivalently to (x, y, z/C). 86 * 87 * phi(x, y, z) = (x*C^2, y*C^3, z) = (x, y, z/C) 88 * d_ge := phi(d) = (d.x, d.y, 1) 89 * ai := phi(a) = (a.x*C^2, a.y*C^3, a.z) 90 * 91 * The group addition functions work correctly on these isomorphic curves. 92 * In particular phi(d) is easy to represent in affine coordinates under this isomorphism. 93 * This lets us use the faster secp256k1_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. 94 */ 95 secp256k1_ge_set_xy(&d_ge, &d.x, &d.y); 96 secp256k1_ge_set_gej_zinv(&pre_a[0], a, &d.z); 97 secp256k1_gej_set_ge(&ai, &pre_a[0]); 98 ai.z = a->z; 99 100 /* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equivalent to a. 101 * Set zr[0] to C, which is the ratio between the omitted z(pre_a[0]) value and a.z. 102 */ 103 zr[0] = d.z; 104 105 for (i = 1; i < n; i++) { 106 secp256k1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); 107 secp256k1_ge_set_xy(&pre_a[i], &ai.x, &ai.y); 108 } 109 110 /* Multiply the last z-coordinate by C to undo the isomorphism. 111 * Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios, 112 * undoing the isomorphism here undoes the isomorphism for all pre_a values. 113 */ 114 secp256k1_fe_mul(z, &ai.z, &d.z); 115 } 116 117 SECP256K1_INLINE static void secp256k1_ecmult_table_verify(int n, int w) { 118 (void)n; 119 (void)w; 120 VERIFY_CHECK(((n) & 1) == 1); 121 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); 122 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); 123 } 124 125 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge(secp256k1_ge *r, const secp256k1_ge *pre, int n, int w) { 126 secp256k1_ecmult_table_verify(n,w); 127 if (n > 0) { 128 *r = pre[(n-1)/2]; 129 } else { 130 *r = pre[(-n-1)/2]; 131 secp256k1_fe_negate(&(r->y), &(r->y), 1); 132 } 133 } 134 135 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge_lambda(secp256k1_ge *r, const secp256k1_ge *pre, const secp256k1_fe *x, int n, int w) { 136 secp256k1_ecmult_table_verify(n,w); 137 if (n > 0) { 138 secp256k1_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); 139 } else { 140 secp256k1_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); 141 secp256k1_fe_negate(&(r->y), &(r->y), 1); 142 } 143 } 144 145 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge_storage(secp256k1_ge *r, const secp256k1_ge_storage *pre, int n, int w) { 146 secp256k1_ecmult_table_verify(n,w); 147 if (n > 0) { 148 secp256k1_ge_from_storage(r, &pre[(n-1)/2]); 149 } else { 150 secp256k1_ge_from_storage(r, &pre[(-n-1)/2]); 151 secp256k1_fe_negate(&(r->y), &(r->y), 1); 152 } 153 } 154 155 /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), 156 * with the following guarantees: 157 * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) 158 * - two non-zero entries in wnaf are separated by at least w-1 zeroes. 159 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more 160 * than the number of bits in the (absolute value) of the input. 161 */ 162 static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { 163 secp256k1_scalar s; 164 int last_set_bit = -1; 165 int bit = 0; 166 int sign = 1; 167 int carry = 0; 168 169 VERIFY_CHECK(wnaf != NULL); 170 VERIFY_CHECK(0 <= len && len <= 256); 171 VERIFY_CHECK(a != NULL); 172 VERIFY_CHECK(2 <= w && w <= 31); 173 174 for (bit = 0; bit < len; bit++) { 175 wnaf[bit] = 0; 176 } 177 178 s = *a; 179 if (secp256k1_scalar_get_bits_limb32(&s, 255, 1)) { 180 secp256k1_scalar_negate(&s, &s); 181 sign = -1; 182 } 183 184 bit = 0; 185 while (bit < len) { 186 int now; 187 int word; 188 if (secp256k1_scalar_get_bits_limb32(&s, bit, 1) == (unsigned int)carry) { 189 bit++; 190 continue; 191 } 192 193 now = w; 194 if (now > len - bit) { 195 now = len - bit; 196 } 197 198 word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; 199 200 carry = (word >> (w-1)) & 1; 201 word -= carry << w; 202 203 wnaf[bit] = sign * word; 204 last_set_bit = bit; 205 206 bit += now; 207 } 208 #ifdef VERIFY 209 { 210 int verify_bit = bit; 211 212 VERIFY_CHECK(carry == 0); 213 214 while (verify_bit < 256) { 215 VERIFY_CHECK(secp256k1_scalar_get_bits_limb32(&s, verify_bit, 1) == 0); 216 verify_bit++; 217 } 218 } 219 #endif 220 return last_set_bit + 1; 221 } 222 223 struct secp256k1_strauss_point_state { 224 int wnaf_na_1[129]; 225 int wnaf_na_lam[129]; 226 int bits_na_1; 227 int bits_na_lam; 228 }; 229 230 struct secp256k1_strauss_state { 231 /* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */ 232 secp256k1_fe* aux; 233 secp256k1_ge* pre_a; 234 struct secp256k1_strauss_point_state* ps; 235 }; 236 237 static void secp256k1_ecmult_strauss_wnaf(const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { 238 secp256k1_ge tmpa; 239 secp256k1_fe Z; 240 /* Split G factors. */ 241 secp256k1_scalar ng_1, ng_128; 242 int wnaf_ng_1[129]; 243 int bits_ng_1 = 0; 244 int wnaf_ng_128[129]; 245 int bits_ng_128 = 0; 246 int i; 247 int bits = 0; 248 size_t np; 249 size_t no = 0; 250 251 secp256k1_fe_set_int(&Z, 1); 252 for (np = 0; np < num; ++np) { 253 secp256k1_gej tmp; 254 secp256k1_scalar na_1, na_lam; 255 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { 256 continue; 257 } 258 /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ 259 secp256k1_scalar_split_lambda(&na_1, &na_lam, &na[np]); 260 261 /* build wnaf representation for na_1 and na_lam. */ 262 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); 263 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); 264 VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); 265 VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); 266 if (state->ps[no].bits_na_1 > bits) { 267 bits = state->ps[no].bits_na_1; 268 } 269 if (state->ps[no].bits_na_lam > bits) { 270 bits = state->ps[no].bits_na_lam; 271 } 272 273 /* Calculate odd multiples of a. 274 * All multiples are brought to the same Z 'denominator', which is stored 275 * in Z. Due to secp256k1' isomorphism we can do all operations pretending 276 * that the Z coordinate was 1, use affine addition formulae, and correct 277 * the Z coordinate of the result once at the end. 278 * The exception is the precomputed G table points, which are actually 279 * affine. Compared to the base used for other points, they have a Z ratio 280 * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same 281 * isomorphism to efficiently add with a known Z inverse. 282 */ 283 tmp = a[np]; 284 if (no) { 285 secp256k1_gej_rescale(&tmp, &Z); 286 } 287 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); 288 if (no) secp256k1_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); 289 290 ++no; 291 } 292 293 /* Bring them to the same Z denominator. */ 294 if (no) { 295 secp256k1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); 296 } 297 298 for (np = 0; np < no; ++np) { 299 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { 300 secp256k1_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &secp256k1_const_beta); 301 } 302 } 303 304 if (ng) { 305 /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ 306 secp256k1_scalar_split_128(&ng_1, &ng_128, ng); 307 308 /* Build wnaf representation for ng_1 and ng_128 */ 309 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); 310 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); 311 if (bits_ng_1 > bits) { 312 bits = bits_ng_1; 313 } 314 if (bits_ng_128 > bits) { 315 bits = bits_ng_128; 316 } 317 } 318 319 secp256k1_gej_set_infinity(r); 320 321 for (i = bits - 1; i >= 0; i--) { 322 int n; 323 secp256k1_gej_double_var(r, r, NULL); 324 for (np = 0; np < no; ++np) { 325 if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { 326 secp256k1_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 327 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); 328 } 329 if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { 330 secp256k1_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 331 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); 332 } 333 } 334 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { 335 secp256k1_ecmult_table_get_ge_storage(&tmpa, secp256k1_pre_g, n, WINDOW_G); 336 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); 337 } 338 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { 339 secp256k1_ecmult_table_get_ge_storage(&tmpa, secp256k1_pre_g_128, n, WINDOW_G); 340 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); 341 } 342 } 343 344 if (!r->infinity) { 345 secp256k1_fe_mul(&r->z, &r->z, &Z); 346 } 347 } 348 349 static void secp256k1_ecmult(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { 350 secp256k1_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; 351 secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; 352 struct secp256k1_strauss_point_state ps[1]; 353 struct secp256k1_strauss_state state; 354 355 state.aux = aux; 356 state.pre_a = pre_a; 357 state.ps = ps; 358 secp256k1_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); 359 } 360 361 static size_t secp256k1_strauss_scratch_size(size_t n_points) { 362 static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); 363 return n_points*point_size; 364 } 365 366 static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 367 secp256k1_gej* points; 368 secp256k1_scalar* scalars; 369 struct secp256k1_strauss_state state; 370 size_t i; 371 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); 372 373 secp256k1_gej_set_infinity(r); 374 if (inp_g_sc == NULL && n_points == 0) { 375 return 1; 376 } 377 378 /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these 379 * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS 380 * constant and strauss_scratch_size accordingly. */ 381 points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej)); 382 scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); 383 state.aux = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); 384 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); 385 state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); 386 387 if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) { 388 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 389 return 0; 390 } 391 392 for (i = 0; i < n_points; i++) { 393 secp256k1_ge point; 394 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { 395 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 396 return 0; 397 } 398 secp256k1_gej_set_ge(&points[i], &point); 399 } 400 secp256k1_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); 401 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 402 return 1; 403 } 404 405 /* Wrapper for secp256k1_ecmult_multi_func interface */ 406 static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 407 return secp256k1_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 408 } 409 410 static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { 411 return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); 412 } 413 414 /** Convert a number to WNAF notation. 415 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. 416 * It has the following guarantees: 417 * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w) 418 * - the number of words set is always WNAF_SIZE(w) 419 * - the returned skew is 0 or 1 420 */ 421 static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { 422 int skew = 0; 423 int pos; 424 int max_pos; 425 int last_w; 426 const secp256k1_scalar *work = s; 427 428 if (secp256k1_scalar_is_zero(s)) { 429 for (pos = 0; pos < WNAF_SIZE(w); pos++) { 430 wnaf[pos] = 0; 431 } 432 return 0; 433 } 434 435 if (secp256k1_scalar_is_even(s)) { 436 skew = 1; 437 } 438 439 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew; 440 /* Compute last window size. Relevant when window size doesn't divide the 441 * number of bits in the scalar */ 442 last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; 443 444 /* Store the position of the first nonzero word in max_pos to allow 445 * skipping leading zeros when calculating the wnaf. */ 446 for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { 447 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 448 if(val != 0) { 449 break; 450 } 451 wnaf[pos] = 0; 452 } 453 max_pos = pos; 454 pos = 1; 455 456 while (pos <= max_pos) { 457 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 458 if ((val & 1) == 0) { 459 wnaf[pos - 1] -= (1 << w); 460 wnaf[pos] = (val + 1); 461 } else { 462 wnaf[pos] = val; 463 } 464 /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit 465 * is strictly negative or strictly positive respectively. Only change 466 * coefficients at previous positions because above code assumes that 467 * wnaf[pos - 1] is odd. 468 */ 469 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) { 470 if (wnaf[pos - 1] == 1) { 471 wnaf[pos - 2] += 1 << w; 472 } else { 473 wnaf[pos - 2] -= 1 << w; 474 } 475 wnaf[pos - 1] = 0; 476 } 477 ++pos; 478 } 479 480 return skew; 481 } 482 483 struct secp256k1_pippenger_point_state { 484 int skew_na; 485 size_t input_pos; 486 }; 487 488 struct secp256k1_pippenger_state { 489 int *wnaf_na; 490 struct secp256k1_pippenger_point_state* ps; 491 }; 492 493 /* 494 * pippenger_wnaf computes the result of a multi-point multiplication as 495 * follows: The scalars are brought into wnaf with n_wnaf elements each. Then 496 * for every i < n_wnaf, first each point is added to a "bucket" corresponding 497 * to the point's wnaf[i]. Second, the buckets are added together such that 498 * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... 499 */ 500 static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) { 501 size_t n_wnaf = WNAF_SIZE(bucket_window+1); 502 size_t np; 503 size_t no = 0; 504 int i; 505 int j; 506 507 for (np = 0; np < num; ++np) { 508 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) { 509 continue; 510 } 511 state->ps[no].input_pos = np; 512 state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); 513 no++; 514 } 515 secp256k1_gej_set_infinity(r); 516 517 if (no == 0) { 518 return 1; 519 } 520 521 for (i = n_wnaf - 1; i >= 0; i--) { 522 secp256k1_gej running_sum; 523 524 for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { 525 secp256k1_gej_set_infinity(&buckets[j]); 526 } 527 528 for (np = 0; np < no; ++np) { 529 int n = state->wnaf_na[np*n_wnaf + i]; 530 struct secp256k1_pippenger_point_state point_state = state->ps[np]; 531 secp256k1_ge tmp; 532 int idx; 533 534 if (i == 0) { 535 /* correct for wnaf skew */ 536 int skew = point_state.skew_na; 537 if (skew) { 538 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); 539 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); 540 } 541 } 542 if (n > 0) { 543 idx = (n - 1)/2; 544 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); 545 } else if (n < 0) { 546 idx = -(n + 1)/2; 547 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); 548 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); 549 } 550 } 551 552 for(j = 0; j < bucket_window; j++) { 553 secp256k1_gej_double_var(r, r, NULL); 554 } 555 556 secp256k1_gej_set_infinity(&running_sum); 557 /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... 558 * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... 559 * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) 560 * using an intermediate running sum: 561 * running_sum = bucket[0] + bucket[1] + bucket[2] + ... 562 * 563 * The doubling is done implicitly by deferring the final window doubling (of 'r'). 564 */ 565 for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { 566 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); 567 secp256k1_gej_add_var(r, r, &running_sum, NULL); 568 } 569 570 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); 571 secp256k1_gej_double_var(r, r, NULL); 572 secp256k1_gej_add_var(r, r, &running_sum, NULL); 573 } 574 return 1; 575 } 576 577 /** 578 * Returns optimal bucket_window (number of bits of a scalar represented by a 579 * set of buckets) for a given number of points. 580 */ 581 static int secp256k1_pippenger_bucket_window(size_t n) { 582 if (n <= 1) { 583 return 1; 584 } else if (n <= 4) { 585 return 2; 586 } else if (n <= 20) { 587 return 3; 588 } else if (n <= 57) { 589 return 4; 590 } else if (n <= 136) { 591 return 5; 592 } else if (n <= 235) { 593 return 6; 594 } else if (n <= 1260) { 595 return 7; 596 } else if (n <= 4420) { 597 return 9; 598 } else if (n <= 7880) { 599 return 10; 600 } else if (n <= 16050) { 601 return 11; 602 } else { 603 return PIPPENGER_MAX_BUCKET_WINDOW; 604 } 605 } 606 607 /** 608 * Returns the maximum optimal number of points for a bucket_window. 609 */ 610 static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { 611 switch(bucket_window) { 612 case 1: return 1; 613 case 2: return 4; 614 case 3: return 20; 615 case 4: return 57; 616 case 5: return 136; 617 case 6: return 235; 618 case 7: return 1260; 619 case 8: return 1260; 620 case 9: return 4420; 621 case 10: return 7880; 622 case 11: return 16050; 623 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; 624 } 625 return 0; 626 } 627 628 629 SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { 630 secp256k1_scalar tmp = *s1; 631 secp256k1_scalar_split_lambda(s1, s2, &tmp); 632 secp256k1_ge_mul_lambda(p2, p1); 633 634 if (secp256k1_scalar_is_high(s1)) { 635 secp256k1_scalar_negate(s1, s1); 636 secp256k1_ge_neg(p1, p1); 637 } 638 if (secp256k1_scalar_is_high(s2)) { 639 secp256k1_scalar_negate(s2, s2); 640 secp256k1_ge_neg(p2, p2); 641 } 642 } 643 644 /** 645 * Returns the scratch size required for a given number of points (excluding 646 * base point G) without considering alignment. 647 */ 648 static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { 649 size_t entries = 2*n_points + 2; 650 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 651 return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; 652 } 653 654 static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 655 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); 656 /* Use 2(n+1) with the endomorphism, when calculating batch 657 * sizes. The reason for +1 is that we add the G scalar to the list of 658 * other scalars. */ 659 size_t entries = 2*n_points + 2; 660 secp256k1_ge *points; 661 secp256k1_scalar *scalars; 662 secp256k1_gej *buckets; 663 struct secp256k1_pippenger_state *state_space; 664 size_t idx = 0; 665 size_t point_idx = 0; 666 int bucket_window; 667 668 secp256k1_gej_set_infinity(r); 669 if (inp_g_sc == NULL && n_points == 0) { 670 return 1; 671 } 672 bucket_window = secp256k1_pippenger_bucket_window(n_points); 673 674 /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If 675 * these allocations change, make sure to update the 676 * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size 677 * accordingly. */ 678 points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); 679 scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); 680 state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space)); 681 if (points == NULL || scalars == NULL || state_space == NULL) { 682 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 683 return 0; 684 } 685 state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); 686 state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); 687 buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets)); 688 if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { 689 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 690 return 0; 691 } 692 693 if (inp_g_sc != NULL) { 694 scalars[0] = *inp_g_sc; 695 points[0] = secp256k1_ge_const_g; 696 idx++; 697 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); 698 idx++; 699 } 700 701 while (point_idx < n_points) { 702 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { 703 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 704 return 0; 705 } 706 idx++; 707 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); 708 idx++; 709 point_idx++; 710 } 711 712 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); 713 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 714 return 1; 715 } 716 717 /* Wrapper for secp256k1_ecmult_multi_func interface */ 718 static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 719 return secp256k1_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 720 } 721 722 /** 723 * Returns the maximum number of points in addition to G that can be used with 724 * a given scratch space. The function ensures that fewer points may also be 725 * used. 726 */ 727 static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { 728 size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS); 729 int bucket_window; 730 size_t res = 0; 731 732 for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) { 733 size_t n_points; 734 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window); 735 size_t space_for_points; 736 size_t space_overhead; 737 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 738 739 entry_size = 2*entry_size; 740 space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state); 741 if (space_overhead > max_alloc) { 742 break; 743 } 744 space_for_points = max_alloc - space_overhead; 745 746 n_points = space_for_points/entry_size; 747 n_points = n_points > max_points ? max_points : n_points; 748 if (n_points > res) { 749 res = n_points; 750 } 751 if (n_points < max_points) { 752 /* A larger bucket_window may support even more points. But if we 753 * would choose that then the caller couldn't safely use any number 754 * smaller than what this function returns */ 755 break; 756 } 757 } 758 return res; 759 } 760 761 /* Computes ecmult_multi by simply multiplying and adding each point. Does not 762 * require a scratch space */ 763 static int secp256k1_ecmult_multi_simple_var(secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { 764 size_t point_idx; 765 secp256k1_gej tmpj; 766 767 secp256k1_gej_set_infinity(r); 768 secp256k1_gej_set_infinity(&tmpj); 769 /* r = inp_g_sc*G */ 770 secp256k1_ecmult(r, &tmpj, &secp256k1_scalar_zero, inp_g_sc); 771 for (point_idx = 0; point_idx < n_points; point_idx++) { 772 secp256k1_ge point; 773 secp256k1_gej pointj; 774 secp256k1_scalar scalar; 775 if (!cb(&scalar, &point, point_idx, cbdata)) { 776 return 0; 777 } 778 /* r += scalar*point */ 779 secp256k1_gej_set_ge(&pointj, &point); 780 secp256k1_ecmult(&tmpj, &pointj, &scalar, NULL); 781 secp256k1_gej_add_var(r, r, &tmpj, NULL); 782 } 783 return 1; 784 } 785 786 /* Compute the number of batches and the batch size given the maximum batch size and the 787 * total number of points */ 788 static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { 789 if (max_n_batch_points == 0) { 790 return 0; 791 } 792 if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { 793 max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; 794 } 795 if (n == 0) { 796 *n_batches = 0; 797 *n_batch_points = 0; 798 return 1; 799 } 800 /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ 801 *n_batches = CEIL_DIV(n, max_n_batch_points); 802 *n_batch_points = CEIL_DIV(n, *n_batches); 803 return 1; 804 } 805 806 typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); 807 static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 808 size_t i; 809 810 int (*f)(const secp256k1_callback* error_callback, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); 811 size_t n_batches; 812 size_t n_batch_points; 813 814 secp256k1_gej_set_infinity(r); 815 if (inp_g_sc == NULL && n == 0) { 816 return 1; 817 } else if (n == 0) { 818 secp256k1_ecmult(r, r, &secp256k1_scalar_zero, inp_g_sc); 819 return 1; 820 } 821 if (scratch == NULL) { 822 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 823 } 824 825 /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than 826 * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. 827 * As a first step check if there's enough space for Pippenger's algo (which requires less space 828 * than Strauss' algo) and if not, use the simple algorithm. */ 829 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) { 830 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 831 } 832 if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { 833 f = secp256k1_ecmult_pippenger_batch; 834 } else { 835 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) { 836 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 837 } 838 f = secp256k1_ecmult_strauss_batch; 839 } 840 for(i = 0; i < n_batches; i++) { 841 size_t nbp = n < n_batch_points ? n : n_batch_points; 842 size_t offset = n_batch_points*i; 843 secp256k1_gej tmp; 844 if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { 845 return 0; 846 } 847 secp256k1_gej_add_var(r, r, &tmp, NULL); 848 n -= nbp; 849 } 850 return 1; 851 } 852 853 #endif /* SECP256K1_ECMULT_IMPL_H */