github.com/luckypickle/go-ethereum-vet@v1.14.2/crypto/secp256k1/libsecp256k1/src/group_impl.h (about) 1 /********************************************************************** 2 * Copyright (c) 2013, 2014 Pieter Wuille * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.* 5 **********************************************************************/ 6 7 #ifndef _SECP256K1_GROUP_IMPL_H_ 8 #define _SECP256K1_GROUP_IMPL_H_ 9 10 #include "num.h" 11 #include "field.h" 12 #include "group.h" 13 14 /* These points can be generated in sage as follows: 15 * 16 * 0. Setup a worksheet with the following parameters. 17 * b = 4 # whatever VET_CURVE_B will be set to 18 * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) 19 * C = EllipticCurve ([F (0), F (b)]) 20 * 21 * 1. Determine all the small orders available to you. (If there are 22 * no satisfactory ones, go back and change b.) 23 * print C.order().factor(limit=1000) 24 * 25 * 2. Choose an order as one of the prime factors listed in the above step. 26 * (You can also multiply some to get a composite order, though the 27 * tests will crash trying to invert scalars during signing.) We take a 28 * random point and scale it to drop its order to the desired value. 29 * There is some probability this won't work; just try again. 30 * order = 199 31 * P = C.random_point() 32 * P = (int(P.order()) / int(order)) * P 33 * assert(P.order() == order) 34 * 35 * 3. Print the values. You'll need to use a vim macro or something to 36 * split the hex output into 4-byte chunks. 37 * print "%x %x" % P.xy() 38 */ 39 #if defined(EXHAUSTIVE_TEST_ORDER) 40 # if EXHAUSTIVE_TEST_ORDER == 199 41 const vet_secp256k1_ge vet_secp256k1_ge_const_g = SECP256K1_GE_CONST( 42 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, 43 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, 44 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, 45 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED 46 ); 47 48 const int VET_CURVE_B = 4; 49 # elif EXHAUSTIVE_TEST_ORDER == 13 50 const vet_secp256k1_ge vet_secp256k1_ge_const_g = SECP256K1_GE_CONST( 51 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, 52 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, 53 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, 54 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac 55 ); 56 const int VET_CURVE_B = 2; 57 # else 58 # error No known generator for the specified exhaustive test group order. 59 # endif 60 #else 61 /** Generator for secp256k1, value 'g' defined in 62 * "Standards for Efficient Cryptography" (SEC2) 2.7.1. 63 */ 64 static const vet_secp256k1_ge vet_secp256k1_ge_const_g = SECP256K1_GE_CONST( 65 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, 66 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, 67 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, 68 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL 69 ); 70 71 const int VET_CURVE_B = 7; 72 #endif 73 74 static void vet_secp256k1_ge_set_gej_zinv(vet_secp256k1_ge *r, const vet_secp256k1_gej *a, const vet_secp256k1_fe *zi) { 75 vet_secp256k1_fe zi2; 76 vet_secp256k1_fe zi3; 77 vet_secp256k1_fe_sqr(&zi2, zi); 78 vet_secp256k1_fe_mul(&zi3, &zi2, zi); 79 vet_secp256k1_fe_mul(&r->x, &a->x, &zi2); 80 vet_secp256k1_fe_mul(&r->y, &a->y, &zi3); 81 r->infinity = a->infinity; 82 } 83 84 static void vet_secp256k1_ge_set_xy(vet_secp256k1_ge *r, const vet_secp256k1_fe *x, const vet_secp256k1_fe *y) { 85 r->infinity = 0; 86 r->x = *x; 87 r->y = *y; 88 } 89 90 static int vet_secp256k1_ge_is_infinity(const vet_secp256k1_ge *a) { 91 return a->infinity; 92 } 93 94 static void vet_secp256k1_ge_neg(vet_secp256k1_ge *r, const vet_secp256k1_ge *a) { 95 *r = *a; 96 vet_secp256k1_fe_normalize_weak(&r->y); 97 vet_secp256k1_fe_negate(&r->y, &r->y, 1); 98 } 99 100 static void vet_secp256k1_ge_set_gej(vet_secp256k1_ge *r, vet_secp256k1_gej *a) { 101 vet_secp256k1_fe z2, z3; 102 r->infinity = a->infinity; 103 vet_secp256k1_fe_inv(&a->z, &a->z); 104 vet_secp256k1_fe_sqr(&z2, &a->z); 105 vet_secp256k1_fe_mul(&z3, &a->z, &z2); 106 vet_secp256k1_fe_mul(&a->x, &a->x, &z2); 107 vet_secp256k1_fe_mul(&a->y, &a->y, &z3); 108 vet_secp256k1_fe_set_int(&a->z, 1); 109 r->x = a->x; 110 r->y = a->y; 111 } 112 113 static void vet_secp256k1_ge_set_gej_var(vet_secp256k1_ge *r, vet_secp256k1_gej *a) { 114 vet_secp256k1_fe z2, z3; 115 r->infinity = a->infinity; 116 if (a->infinity) { 117 return; 118 } 119 vet_secp256k1_fe_inv_var(&a->z, &a->z); 120 vet_secp256k1_fe_sqr(&z2, &a->z); 121 vet_secp256k1_fe_mul(&z3, &a->z, &z2); 122 vet_secp256k1_fe_mul(&a->x, &a->x, &z2); 123 vet_secp256k1_fe_mul(&a->y, &a->y, &z3); 124 vet_secp256k1_fe_set_int(&a->z, 1); 125 r->x = a->x; 126 r->y = a->y; 127 } 128 129 static void vet_secp256k1_ge_set_all_gej_var(vet_secp256k1_ge *r, const vet_secp256k1_gej *a, size_t len, const vet_secp256k1_callback *cb) { 130 vet_secp256k1_fe *az; 131 vet_secp256k1_fe *azi; 132 size_t i; 133 size_t count = 0; 134 az = (vet_secp256k1_fe *)checked_malloc(cb, sizeof(vet_secp256k1_fe) * len); 135 for (i = 0; i < len; i++) { 136 if (!a[i].infinity) { 137 az[count++] = a[i].z; 138 } 139 } 140 141 azi = (vet_secp256k1_fe *)checked_malloc(cb, sizeof(vet_secp256k1_fe) * count); 142 vet_secp256k1_fe_inv_all_var(azi, az, count); 143 free(az); 144 145 count = 0; 146 for (i = 0; i < len; i++) { 147 r[i].infinity = a[i].infinity; 148 if (!a[i].infinity) { 149 vet_secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]); 150 } 151 } 152 free(azi); 153 } 154 155 static void vet_secp256k1_ge_set_table_gej_var(vet_secp256k1_ge *r, const vet_secp256k1_gej *a, const vet_secp256k1_fe *zr, size_t len) { 156 size_t i = len - 1; 157 vet_secp256k1_fe zi; 158 159 if (len > 0) { 160 /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */ 161 vet_secp256k1_fe_inv(&zi, &a[i].z); 162 vet_secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); 163 164 /* Work out way backwards, using the z-ratios to scale the x/y values. */ 165 while (i > 0) { 166 vet_secp256k1_fe_mul(&zi, &zi, &zr[i]); 167 i--; 168 vet_secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); 169 } 170 } 171 } 172 173 static void vet_secp256k1_ge_globalz_set_table_gej(size_t len, vet_secp256k1_ge *r, vet_secp256k1_fe *globalz, const vet_secp256k1_gej *a, const vet_secp256k1_fe *zr) { 174 size_t i = len - 1; 175 vet_secp256k1_fe zs; 176 177 if (len > 0) { 178 /* The z of the final point gives us the "global Z" for the table. */ 179 r[i].x = a[i].x; 180 r[i].y = a[i].y; 181 *globalz = a[i].z; 182 r[i].infinity = 0; 183 zs = zr[i]; 184 185 /* Work our way backwards, using the z-ratios to scale the x/y values. */ 186 while (i > 0) { 187 if (i != len - 1) { 188 vet_secp256k1_fe_mul(&zs, &zs, &zr[i]); 189 } 190 i--; 191 vet_secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs); 192 } 193 } 194 } 195 196 static void vet_secp256k1_gej_set_infinity(vet_secp256k1_gej *r) { 197 r->infinity = 1; 198 vet_secp256k1_fe_clear(&r->x); 199 vet_secp256k1_fe_clear(&r->y); 200 vet_secp256k1_fe_clear(&r->z); 201 } 202 203 static void vet_secp256k1_gej_clear(vet_secp256k1_gej *r) { 204 r->infinity = 0; 205 vet_secp256k1_fe_clear(&r->x); 206 vet_secp256k1_fe_clear(&r->y); 207 vet_secp256k1_fe_clear(&r->z); 208 } 209 210 static void vet_secp256k1_ge_clear(vet_secp256k1_ge *r) { 211 r->infinity = 0; 212 vet_secp256k1_fe_clear(&r->x); 213 vet_secp256k1_fe_clear(&r->y); 214 } 215 216 static int vet_secp256k1_ge_set_xquad(vet_secp256k1_ge *r, const vet_secp256k1_fe *x) { 217 vet_secp256k1_fe x2, x3, c; 218 r->x = *x; 219 vet_secp256k1_fe_sqr(&x2, x); 220 vet_secp256k1_fe_mul(&x3, x, &x2); 221 r->infinity = 0; 222 vet_secp256k1_fe_set_int(&c, VET_CURVE_B); 223 vet_secp256k1_fe_add(&c, &x3); 224 return vet_secp256k1_fe_sqrt(&r->y, &c); 225 } 226 227 static int vet_secp256k1_ge_set_xo_var(vet_secp256k1_ge *r, const vet_secp256k1_fe *x, int odd) { 228 if (!vet_secp256k1_ge_set_xquad(r, x)) { 229 return 0; 230 } 231 vet_secp256k1_fe_normalize_var(&r->y); 232 if (vet_secp256k1_fe_is_odd(&r->y) != odd) { 233 vet_secp256k1_fe_negate(&r->y, &r->y, 1); 234 } 235 return 1; 236 237 } 238 239 static void vet_secp256k1_gej_set_ge(vet_secp256k1_gej *r, const vet_secp256k1_ge *a) { 240 r->infinity = a->infinity; 241 r->x = a->x; 242 r->y = a->y; 243 vet_secp256k1_fe_set_int(&r->z, 1); 244 } 245 246 static int vet_secp256k1_gej_eq_x_var(const vet_secp256k1_fe *x, const vet_secp256k1_gej *a) { 247 vet_secp256k1_fe r, r2; 248 VERIFY_CHECK(!a->infinity); 249 vet_secp256k1_fe_sqr(&r, &a->z); vet_secp256k1_fe_mul(&r, &r, x); 250 r2 = a->x; vet_secp256k1_fe_normalize_weak(&r2); 251 return vet_secp256k1_fe_equal_var(&r, &r2); 252 } 253 254 static void vet_secp256k1_gej_neg(vet_secp256k1_gej *r, const vet_secp256k1_gej *a) { 255 r->infinity = a->infinity; 256 r->x = a->x; 257 r->y = a->y; 258 r->z = a->z; 259 vet_secp256k1_fe_normalize_weak(&r->y); 260 vet_secp256k1_fe_negate(&r->y, &r->y, 1); 261 } 262 263 static int vet_secp256k1_gej_is_infinity(const vet_secp256k1_gej *a) { 264 return a->infinity; 265 } 266 267 static int vet_secp256k1_gej_is_valid_var(const vet_secp256k1_gej *a) { 268 vet_secp256k1_fe y2, x3, z2, z6; 269 if (a->infinity) { 270 return 0; 271 } 272 /** y^2 = x^3 + 7 273 * (Y/Z^3)^2 = (X/Z^2)^3 + 7 274 * Y^2 / Z^6 = X^3 / Z^6 + 7 275 * Y^2 = X^3 + 7*Z^6 276 */ 277 vet_secp256k1_fe_sqr(&y2, &a->y); 278 vet_secp256k1_fe_sqr(&x3, &a->x); vet_secp256k1_fe_mul(&x3, &x3, &a->x); 279 vet_secp256k1_fe_sqr(&z2, &a->z); 280 vet_secp256k1_fe_sqr(&z6, &z2); vet_secp256k1_fe_mul(&z6, &z6, &z2); 281 vet_secp256k1_fe_mul_int(&z6, VET_CURVE_B); 282 vet_secp256k1_fe_add(&x3, &z6); 283 vet_secp256k1_fe_normalize_weak(&x3); 284 return vet_secp256k1_fe_equal_var(&y2, &x3); 285 } 286 287 static int vet_secp256k1_ge_is_valid_var(const vet_secp256k1_ge *a) { 288 vet_secp256k1_fe y2, x3, c; 289 if (a->infinity) { 290 return 0; 291 } 292 /* y^2 = x^3 + 7 */ 293 vet_secp256k1_fe_sqr(&y2, &a->y); 294 vet_secp256k1_fe_sqr(&x3, &a->x); vet_secp256k1_fe_mul(&x3, &x3, &a->x); 295 vet_secp256k1_fe_set_int(&c, VET_CURVE_B); 296 vet_secp256k1_fe_add(&x3, &c); 297 vet_secp256k1_fe_normalize_weak(&x3); 298 return vet_secp256k1_fe_equal_var(&y2, &x3); 299 } 300 301 static void vet_secp256k1_gej_double_var(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, vet_secp256k1_fe *rzr) { 302 /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. 303 * 304 * Note that there is an implementation described at 305 * https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l 306 * which trades a multiply for a square, but in practice this is actually slower, 307 * mainly because it requires more normalizations. 308 */ 309 vet_secp256k1_fe t1,t2,t3,t4; 310 /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, 311 * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have 312 * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. 313 * 314 * Having said this, if this function receives a point on a sextic twist, e.g. by 315 * a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6, 316 * since -6 does have a cube root mod p. For this point, this function will not set 317 * the infinity flag even though the point doubles to infinity, and the result 318 * point will be gibberish (z = 0 but infinity = 0). 319 */ 320 r->infinity = a->infinity; 321 if (r->infinity) { 322 if (rzr != NULL) { 323 vet_secp256k1_fe_set_int(rzr, 1); 324 } 325 return; 326 } 327 328 if (rzr != NULL) { 329 *rzr = a->y; 330 vet_secp256k1_fe_normalize_weak(rzr); 331 vet_secp256k1_fe_mul_int(rzr, 2); 332 } 333 334 vet_secp256k1_fe_mul(&r->z, &a->z, &a->y); 335 vet_secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ 336 vet_secp256k1_fe_sqr(&t1, &a->x); 337 vet_secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ 338 vet_secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ 339 vet_secp256k1_fe_sqr(&t3, &a->y); 340 vet_secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ 341 vet_secp256k1_fe_sqr(&t4, &t3); 342 vet_secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ 343 vet_secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ 344 r->x = t3; 345 vet_secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ 346 vet_secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ 347 vet_secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ 348 vet_secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ 349 vet_secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ 350 vet_secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ 351 vet_secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ 352 vet_secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ 353 vet_secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ 354 } 355 356 static SECP256K1_INLINE void vet_secp256k1_gej_double_nonzero(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, vet_secp256k1_fe *rzr) { 357 VERIFY_CHECK(!vet_secp256k1_gej_is_infinity(a)); 358 vet_secp256k1_gej_double_var(r, a, rzr); 359 } 360 361 static void vet_secp256k1_gej_add_var(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, const vet_secp256k1_gej *b, vet_secp256k1_fe *rzr) { 362 /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ 363 vet_secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; 364 365 if (a->infinity) { 366 VERIFY_CHECK(rzr == NULL); 367 *r = *b; 368 return; 369 } 370 371 if (b->infinity) { 372 if (rzr != NULL) { 373 vet_secp256k1_fe_set_int(rzr, 1); 374 } 375 *r = *a; 376 return; 377 } 378 379 r->infinity = 0; 380 vet_secp256k1_fe_sqr(&z22, &b->z); 381 vet_secp256k1_fe_sqr(&z12, &a->z); 382 vet_secp256k1_fe_mul(&u1, &a->x, &z22); 383 vet_secp256k1_fe_mul(&u2, &b->x, &z12); 384 vet_secp256k1_fe_mul(&s1, &a->y, &z22); vet_secp256k1_fe_mul(&s1, &s1, &b->z); 385 vet_secp256k1_fe_mul(&s2, &b->y, &z12); vet_secp256k1_fe_mul(&s2, &s2, &a->z); 386 vet_secp256k1_fe_negate(&h, &u1, 1); vet_secp256k1_fe_add(&h, &u2); 387 vet_secp256k1_fe_negate(&i, &s1, 1); vet_secp256k1_fe_add(&i, &s2); 388 if (vet_secp256k1_fe_normalizes_to_zero_var(&h)) { 389 if (vet_secp256k1_fe_normalizes_to_zero_var(&i)) { 390 vet_secp256k1_gej_double_var(r, a, rzr); 391 } else { 392 if (rzr != NULL) { 393 vet_secp256k1_fe_set_int(rzr, 0); 394 } 395 r->infinity = 1; 396 } 397 return; 398 } 399 vet_secp256k1_fe_sqr(&i2, &i); 400 vet_secp256k1_fe_sqr(&h2, &h); 401 vet_secp256k1_fe_mul(&h3, &h, &h2); 402 vet_secp256k1_fe_mul(&h, &h, &b->z); 403 if (rzr != NULL) { 404 *rzr = h; 405 } 406 vet_secp256k1_fe_mul(&r->z, &a->z, &h); 407 vet_secp256k1_fe_mul(&t, &u1, &h2); 408 r->x = t; vet_secp256k1_fe_mul_int(&r->x, 2); vet_secp256k1_fe_add(&r->x, &h3); vet_secp256k1_fe_negate(&r->x, &r->x, 3); vet_secp256k1_fe_add(&r->x, &i2); 409 vet_secp256k1_fe_negate(&r->y, &r->x, 5); vet_secp256k1_fe_add(&r->y, &t); vet_secp256k1_fe_mul(&r->y, &r->y, &i); 410 vet_secp256k1_fe_mul(&h3, &h3, &s1); vet_secp256k1_fe_negate(&h3, &h3, 1); 411 vet_secp256k1_fe_add(&r->y, &h3); 412 } 413 414 static void vet_secp256k1_gej_add_ge_var(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, const vet_secp256k1_ge *b, vet_secp256k1_fe *rzr) { 415 /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ 416 vet_secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; 417 if (a->infinity) { 418 VERIFY_CHECK(rzr == NULL); 419 vet_secp256k1_gej_set_ge(r, b); 420 return; 421 } 422 if (b->infinity) { 423 if (rzr != NULL) { 424 vet_secp256k1_fe_set_int(rzr, 1); 425 } 426 *r = *a; 427 return; 428 } 429 r->infinity = 0; 430 431 vet_secp256k1_fe_sqr(&z12, &a->z); 432 u1 = a->x; vet_secp256k1_fe_normalize_weak(&u1); 433 vet_secp256k1_fe_mul(&u2, &b->x, &z12); 434 s1 = a->y; vet_secp256k1_fe_normalize_weak(&s1); 435 vet_secp256k1_fe_mul(&s2, &b->y, &z12); vet_secp256k1_fe_mul(&s2, &s2, &a->z); 436 vet_secp256k1_fe_negate(&h, &u1, 1); vet_secp256k1_fe_add(&h, &u2); 437 vet_secp256k1_fe_negate(&i, &s1, 1); vet_secp256k1_fe_add(&i, &s2); 438 if (vet_secp256k1_fe_normalizes_to_zero_var(&h)) { 439 if (vet_secp256k1_fe_normalizes_to_zero_var(&i)) { 440 vet_secp256k1_gej_double_var(r, a, rzr); 441 } else { 442 if (rzr != NULL) { 443 vet_secp256k1_fe_set_int(rzr, 0); 444 } 445 r->infinity = 1; 446 } 447 return; 448 } 449 vet_secp256k1_fe_sqr(&i2, &i); 450 vet_secp256k1_fe_sqr(&h2, &h); 451 vet_secp256k1_fe_mul(&h3, &h, &h2); 452 if (rzr != NULL) { 453 *rzr = h; 454 } 455 vet_secp256k1_fe_mul(&r->z, &a->z, &h); 456 vet_secp256k1_fe_mul(&t, &u1, &h2); 457 r->x = t; vet_secp256k1_fe_mul_int(&r->x, 2); vet_secp256k1_fe_add(&r->x, &h3); vet_secp256k1_fe_negate(&r->x, &r->x, 3); vet_secp256k1_fe_add(&r->x, &i2); 458 vet_secp256k1_fe_negate(&r->y, &r->x, 5); vet_secp256k1_fe_add(&r->y, &t); vet_secp256k1_fe_mul(&r->y, &r->y, &i); 459 vet_secp256k1_fe_mul(&h3, &h3, &s1); vet_secp256k1_fe_negate(&h3, &h3, 1); 460 vet_secp256k1_fe_add(&r->y, &h3); 461 } 462 463 static void vet_secp256k1_gej_add_zinv_var(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, const vet_secp256k1_ge *b, const vet_secp256k1_fe *bzinv) { 464 /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ 465 vet_secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; 466 467 if (b->infinity) { 468 *r = *a; 469 return; 470 } 471 if (a->infinity) { 472 vet_secp256k1_fe bzinv2, bzinv3; 473 r->infinity = b->infinity; 474 vet_secp256k1_fe_sqr(&bzinv2, bzinv); 475 vet_secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv); 476 vet_secp256k1_fe_mul(&r->x, &b->x, &bzinv2); 477 vet_secp256k1_fe_mul(&r->y, &b->y, &bzinv3); 478 vet_secp256k1_fe_set_int(&r->z, 1); 479 return; 480 } 481 r->infinity = 0; 482 483 /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to 484 * secp256k1's isomorphism we can multiply the Z coordinates on both sides 485 * by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1). 486 * This means that (rx,ry,rz) can be calculated as 487 * (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz. 488 * The variable az below holds the modified Z coordinate for a, which is used 489 * for the computation of rx and ry, but not for rz. 490 */ 491 vet_secp256k1_fe_mul(&az, &a->z, bzinv); 492 493 vet_secp256k1_fe_sqr(&z12, &az); 494 u1 = a->x; vet_secp256k1_fe_normalize_weak(&u1); 495 vet_secp256k1_fe_mul(&u2, &b->x, &z12); 496 s1 = a->y; vet_secp256k1_fe_normalize_weak(&s1); 497 vet_secp256k1_fe_mul(&s2, &b->y, &z12); vet_secp256k1_fe_mul(&s2, &s2, &az); 498 vet_secp256k1_fe_negate(&h, &u1, 1); vet_secp256k1_fe_add(&h, &u2); 499 vet_secp256k1_fe_negate(&i, &s1, 1); vet_secp256k1_fe_add(&i, &s2); 500 if (vet_secp256k1_fe_normalizes_to_zero_var(&h)) { 501 if (vet_secp256k1_fe_normalizes_to_zero_var(&i)) { 502 vet_secp256k1_gej_double_var(r, a, NULL); 503 } else { 504 r->infinity = 1; 505 } 506 return; 507 } 508 vet_secp256k1_fe_sqr(&i2, &i); 509 vet_secp256k1_fe_sqr(&h2, &h); 510 vet_secp256k1_fe_mul(&h3, &h, &h2); 511 r->z = a->z; vet_secp256k1_fe_mul(&r->z, &r->z, &h); 512 vet_secp256k1_fe_mul(&t, &u1, &h2); 513 r->x = t; vet_secp256k1_fe_mul_int(&r->x, 2); vet_secp256k1_fe_add(&r->x, &h3); vet_secp256k1_fe_negate(&r->x, &r->x, 3); vet_secp256k1_fe_add(&r->x, &i2); 514 vet_secp256k1_fe_negate(&r->y, &r->x, 5); vet_secp256k1_fe_add(&r->y, &t); vet_secp256k1_fe_mul(&r->y, &r->y, &i); 515 vet_secp256k1_fe_mul(&h3, &h3, &s1); vet_secp256k1_fe_negate(&h3, &h3, 1); 516 vet_secp256k1_fe_add(&r->y, &h3); 517 } 518 519 520 static void vet_secp256k1_gej_add_ge(vet_secp256k1_gej *r, const vet_secp256k1_gej *a, const vet_secp256k1_ge *b) { 521 /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ 522 static const vet_secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); 523 vet_secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; 524 vet_secp256k1_fe m_alt, rr_alt; 525 int infinity, degenerate; 526 VERIFY_CHECK(!b->infinity); 527 VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); 528 529 /** In: 530 * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. 531 * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. 532 * we find as solution for a unified addition/doubling formula: 533 * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. 534 * x3 = lambda^2 - (x1 + x2) 535 * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). 536 * 537 * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: 538 * U1 = X1*Z2^2, U2 = X2*Z1^2 539 * S1 = Y1*Z2^3, S2 = Y2*Z1^3 540 * Z = Z1*Z2 541 * T = U1+U2 542 * M = S1+S2 543 * Q = T*M^2 544 * R = T^2-U1*U2 545 * X3 = 4*(R^2-Q) 546 * Y3 = 4*(R*(3*Q-2*R^2)-M^4) 547 * Z3 = 2*M*Z 548 * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) 549 * 550 * This formula has the benefit of being the same for both addition 551 * of distinct points and doubling. However, it breaks down in the 552 * case that either point is infinity, or that y1 = -y2. We handle 553 * these cases in the following ways: 554 * 555 * - If b is infinity we simply bail by means of a VERIFY_CHECK. 556 * 557 * - If a is infinity, we detect this, and at the end of the 558 * computation replace the result (which will be meaningless, 559 * but we compute to be constant-time) with b.x : b.y : 1. 560 * 561 * - If a = -b, we have y1 = -y2, which is a degenerate case. 562 * But here the answer is infinity, so we simply set the 563 * infinity flag of the result, overriding the computed values 564 * without even needing to cmov. 565 * 566 * - If y1 = -y2 but x1 != x2, which does occur thanks to certain 567 * properties of our curve (specifically, 1 has nontrivial cube 568 * roots in our field, and the curve equation has no x coefficient) 569 * then the answer is not infinity but also not given by the above 570 * equation. In this case, we cmov in place an alternate expression 571 * for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these 572 * expressions for lambda are defined, they are equal, and can be 573 * obtained from each other by multiplication by (y1 + y2)/(y1 + y2) 574 * then substitution of x^3 + 7 for y^2 (using the curve equation). 575 * For all pairs of nonzero points (a, b) at least one is defined, 576 * so this covers everything. 577 */ 578 579 vet_secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ 580 u1 = a->x; vet_secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ 581 vet_secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ 582 s1 = a->y; vet_secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ 583 vet_secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ 584 vet_secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ 585 t = u1; vet_secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ 586 m = s1; vet_secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ 587 vet_secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ 588 vet_secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ 589 vet_secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ 590 vet_secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ 591 /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" 592 * case that Z = z1z2 = 0, and this is special-cased later on). */ 593 degenerate = vet_secp256k1_fe_normalizes_to_zero(&m) & 594 vet_secp256k1_fe_normalizes_to_zero(&rr); 595 /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. 596 * This means either x1 == beta*x2 or beta*x1 == x2, where beta is 597 * a nontrivial cube root of one. In either case, an alternate 598 * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), 599 * so we set R/M equal to this. */ 600 rr_alt = s1; 601 vet_secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ 602 vet_secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ 603 604 vet_secp256k1_fe_cmov(&rr_alt, &rr, !degenerate); 605 vet_secp256k1_fe_cmov(&m_alt, &m, !degenerate); 606 /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. 607 * From here on out Ralt and Malt represent the numerator 608 * and denominator of lambda; R and M represent the explicit 609 * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ 610 vet_secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ 611 vet_secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ 612 /* These two lines use the observation that either M == Malt or M == 0, 613 * so M^3 * Malt is either Malt^4 (which is computed by squaring), or 614 * zero (which is "computed" by cmov). So the cost is one squaring 615 * versus two multiplications. */ 616 vet_secp256k1_fe_sqr(&n, &n); 617 vet_secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ 618 vet_secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ 619 vet_secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ 620 infinity = vet_secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); 621 vet_secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ 622 vet_secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ 623 vet_secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ 624 vet_secp256k1_fe_normalize_weak(&t); 625 r->x = t; /* r->x = Ralt^2-Q (1) */ 626 vet_secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ 627 vet_secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ 628 vet_secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ 629 vet_secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ 630 vet_secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ 631 vet_secp256k1_fe_normalize_weak(&r->y); 632 vet_secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ 633 vet_secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ 634 635 /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ 636 vet_secp256k1_fe_cmov(&r->x, &b->x, a->infinity); 637 vet_secp256k1_fe_cmov(&r->y, &b->y, a->infinity); 638 vet_secp256k1_fe_cmov(&r->z, &fe_1, a->infinity); 639 r->infinity = infinity; 640 } 641 642 static void vet_secp256k1_gej_rescale(vet_secp256k1_gej *r, const vet_secp256k1_fe *s) { 643 /* Operations: 4 mul, 1 sqr */ 644 vet_secp256k1_fe zz; 645 VERIFY_CHECK(!vet_secp256k1_fe_is_zero(s)); 646 vet_secp256k1_fe_sqr(&zz, s); 647 vet_secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ 648 vet_secp256k1_fe_mul(&r->y, &r->y, &zz); 649 vet_secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ 650 vet_secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ 651 } 652 653 static void vet_secp256k1_ge_to_storage(vet_secp256k1_ge_storage *r, const vet_secp256k1_ge *a) { 654 vet_secp256k1_fe x, y; 655 VERIFY_CHECK(!a->infinity); 656 x = a->x; 657 vet_secp256k1_fe_normalize(&x); 658 y = a->y; 659 vet_secp256k1_fe_normalize(&y); 660 vet_secp256k1_fe_to_storage(&r->x, &x); 661 vet_secp256k1_fe_to_storage(&r->y, &y); 662 } 663 664 static void vet_secp256k1_ge_from_storage(vet_secp256k1_ge *r, const vet_secp256k1_ge_storage *a) { 665 vet_secp256k1_fe_from_storage(&r->x, &a->x); 666 vet_secp256k1_fe_from_storage(&r->y, &a->y); 667 r->infinity = 0; 668 } 669 670 static SECP256K1_INLINE void vet_secp256k1_ge_storage_cmov(vet_secp256k1_ge_storage *r, const vet_secp256k1_ge_storage *a, int flag) { 671 vet_secp256k1_fe_storage_cmov(&r->x, &a->x, flag); 672 vet_secp256k1_fe_storage_cmov(&r->y, &a->y, flag); 673 } 674 675 #ifdef USE_ENDOMORPHISM 676 static void vet_secp256k1_ge_mul_lambda(vet_secp256k1_ge *r, const vet_secp256k1_ge *a) { 677 static const vet_secp256k1_fe beta = SECP256K1_FE_CONST( 678 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, 679 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul 680 ); 681 *r = *a; 682 vet_secp256k1_fe_mul(&r->x, &r->x, &beta); 683 } 684 #endif 685 686 static int vet_secp256k1_gej_has_quad_y_var(const vet_secp256k1_gej *a) { 687 vet_secp256k1_fe yz; 688 689 if (a->infinity) { 690 return 0; 691 } 692 693 /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as 694 * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z 695 is */ 696 vet_secp256k1_fe_mul(&yz, &a->y, &a->z); 697 return vet_secp256k1_fe_is_quad_var(&yz); 698 } 699 700 #endif