github.com/luckypickle/go-ethereum-vet@v1.14.2/crypto/secp256k1/libsecp256k1/src/field_impl.h (about) 1 /********************************************************************** 2 * Copyright (c) 2013, 2014 Pieter Wuille * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.* 5 **********************************************************************/ 6 7 #ifndef _SECP256K1_FIELD_IMPL_H_ 8 #define _SECP256K1_FIELD_IMPL_H_ 9 10 #if defined HAVE_CONFIG_H 11 #include "libsecp256k1-config.h" 12 #endif 13 14 #include "util.h" 15 16 #if defined(USE_FIELD_10X26) 17 #include "field_10x26_impl.h" 18 #elif defined(USE_FIELD_5X52) 19 #include "field_5x52_impl.h" 20 #else 21 #error "Please select field implementation" 22 #endif 23 24 SECP256K1_INLINE static int vet_secp256k1_fe_equal(const vet_secp256k1_fe *a, const vet_secp256k1_fe *b) { 25 vet_secp256k1_fe na; 26 vet_secp256k1_fe_negate(&na, a, 1); 27 vet_secp256k1_fe_add(&na, b); 28 return vet_secp256k1_fe_normalizes_to_zero(&na); 29 } 30 31 SECP256K1_INLINE static int vet_secp256k1_fe_equal_var(const vet_secp256k1_fe *a, const vet_secp256k1_fe *b) { 32 vet_secp256k1_fe na; 33 vet_secp256k1_fe_negate(&na, a, 1); 34 vet_secp256k1_fe_add(&na, b); 35 return vet_secp256k1_fe_normalizes_to_zero_var(&na); 36 } 37 38 static int vet_secp256k1_fe_sqrt(vet_secp256k1_fe *r, const vet_secp256k1_fe *a) { 39 /** Given that p is congruent to 3 mod 4, we can compute the square root of 40 * a mod p as the (p+1)/4'th power of a. 41 * 42 * As (p+1)/4 is an even number, it will have the same result for a and for 43 * (-a). Only one of these two numbers actually has a square root however, 44 * so we test at the end by squaring and comparing to the input. 45 * Also because (p+1)/4 is an even number, the computed square root is 46 * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). 47 */ 48 vet_secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; 49 int j; 50 51 /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in 52 * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: 53 * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] 54 */ 55 56 vet_secp256k1_fe_sqr(&x2, a); 57 vet_secp256k1_fe_mul(&x2, &x2, a); 58 59 vet_secp256k1_fe_sqr(&x3, &x2); 60 vet_secp256k1_fe_mul(&x3, &x3, a); 61 62 x6 = x3; 63 for (j=0; j<3; j++) { 64 vet_secp256k1_fe_sqr(&x6, &x6); 65 } 66 vet_secp256k1_fe_mul(&x6, &x6, &x3); 67 68 x9 = x6; 69 for (j=0; j<3; j++) { 70 vet_secp256k1_fe_sqr(&x9, &x9); 71 } 72 vet_secp256k1_fe_mul(&x9, &x9, &x3); 73 74 x11 = x9; 75 for (j=0; j<2; j++) { 76 vet_secp256k1_fe_sqr(&x11, &x11); 77 } 78 vet_secp256k1_fe_mul(&x11, &x11, &x2); 79 80 x22 = x11; 81 for (j=0; j<11; j++) { 82 vet_secp256k1_fe_sqr(&x22, &x22); 83 } 84 vet_secp256k1_fe_mul(&x22, &x22, &x11); 85 86 x44 = x22; 87 for (j=0; j<22; j++) { 88 vet_secp256k1_fe_sqr(&x44, &x44); 89 } 90 vet_secp256k1_fe_mul(&x44, &x44, &x22); 91 92 x88 = x44; 93 for (j=0; j<44; j++) { 94 vet_secp256k1_fe_sqr(&x88, &x88); 95 } 96 vet_secp256k1_fe_mul(&x88, &x88, &x44); 97 98 x176 = x88; 99 for (j=0; j<88; j++) { 100 vet_secp256k1_fe_sqr(&x176, &x176); 101 } 102 vet_secp256k1_fe_mul(&x176, &x176, &x88); 103 104 x220 = x176; 105 for (j=0; j<44; j++) { 106 vet_secp256k1_fe_sqr(&x220, &x220); 107 } 108 vet_secp256k1_fe_mul(&x220, &x220, &x44); 109 110 x223 = x220; 111 for (j=0; j<3; j++) { 112 vet_secp256k1_fe_sqr(&x223, &x223); 113 } 114 vet_secp256k1_fe_mul(&x223, &x223, &x3); 115 116 /* The final result is then assembled using a sliding window over the blocks. */ 117 118 t1 = x223; 119 for (j=0; j<23; j++) { 120 vet_secp256k1_fe_sqr(&t1, &t1); 121 } 122 vet_secp256k1_fe_mul(&t1, &t1, &x22); 123 for (j=0; j<6; j++) { 124 vet_secp256k1_fe_sqr(&t1, &t1); 125 } 126 vet_secp256k1_fe_mul(&t1, &t1, &x2); 127 vet_secp256k1_fe_sqr(&t1, &t1); 128 vet_secp256k1_fe_sqr(r, &t1); 129 130 /* Check that a square root was actually calculated */ 131 132 vet_secp256k1_fe_sqr(&t1, r); 133 return vet_secp256k1_fe_equal(&t1, a); 134 } 135 136 static void vet_secp256k1_fe_inv(vet_secp256k1_fe *r, const vet_secp256k1_fe *a) { 137 vet_secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; 138 int j; 139 140 /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in 141 * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: 142 * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] 143 */ 144 145 vet_secp256k1_fe_sqr(&x2, a); 146 vet_secp256k1_fe_mul(&x2, &x2, a); 147 148 vet_secp256k1_fe_sqr(&x3, &x2); 149 vet_secp256k1_fe_mul(&x3, &x3, a); 150 151 x6 = x3; 152 for (j=0; j<3; j++) { 153 vet_secp256k1_fe_sqr(&x6, &x6); 154 } 155 vet_secp256k1_fe_mul(&x6, &x6, &x3); 156 157 x9 = x6; 158 for (j=0; j<3; j++) { 159 vet_secp256k1_fe_sqr(&x9, &x9); 160 } 161 vet_secp256k1_fe_mul(&x9, &x9, &x3); 162 163 x11 = x9; 164 for (j=0; j<2; j++) { 165 vet_secp256k1_fe_sqr(&x11, &x11); 166 } 167 vet_secp256k1_fe_mul(&x11, &x11, &x2); 168 169 x22 = x11; 170 for (j=0; j<11; j++) { 171 vet_secp256k1_fe_sqr(&x22, &x22); 172 } 173 vet_secp256k1_fe_mul(&x22, &x22, &x11); 174 175 x44 = x22; 176 for (j=0; j<22; j++) { 177 vet_secp256k1_fe_sqr(&x44, &x44); 178 } 179 vet_secp256k1_fe_mul(&x44, &x44, &x22); 180 181 x88 = x44; 182 for (j=0; j<44; j++) { 183 vet_secp256k1_fe_sqr(&x88, &x88); 184 } 185 vet_secp256k1_fe_mul(&x88, &x88, &x44); 186 187 x176 = x88; 188 for (j=0; j<88; j++) { 189 vet_secp256k1_fe_sqr(&x176, &x176); 190 } 191 vet_secp256k1_fe_mul(&x176, &x176, &x88); 192 193 x220 = x176; 194 for (j=0; j<44; j++) { 195 vet_secp256k1_fe_sqr(&x220, &x220); 196 } 197 vet_secp256k1_fe_mul(&x220, &x220, &x44); 198 199 x223 = x220; 200 for (j=0; j<3; j++) { 201 vet_secp256k1_fe_sqr(&x223, &x223); 202 } 203 vet_secp256k1_fe_mul(&x223, &x223, &x3); 204 205 /* The final result is then assembled using a sliding window over the blocks. */ 206 207 t1 = x223; 208 for (j=0; j<23; j++) { 209 vet_secp256k1_fe_sqr(&t1, &t1); 210 } 211 vet_secp256k1_fe_mul(&t1, &t1, &x22); 212 for (j=0; j<5; j++) { 213 vet_secp256k1_fe_sqr(&t1, &t1); 214 } 215 vet_secp256k1_fe_mul(&t1, &t1, a); 216 for (j=0; j<3; j++) { 217 vet_secp256k1_fe_sqr(&t1, &t1); 218 } 219 vet_secp256k1_fe_mul(&t1, &t1, &x2); 220 for (j=0; j<2; j++) { 221 vet_secp256k1_fe_sqr(&t1, &t1); 222 } 223 vet_secp256k1_fe_mul(r, a, &t1); 224 } 225 226 static void vet_secp256k1_fe_inv_var(vet_secp256k1_fe *r, const vet_secp256k1_fe *a) { 227 #if defined(USE_FIELD_INV_BUILTIN) 228 vet_secp256k1_fe_inv(r, a); 229 #elif defined(USE_FIELD_INV_NUM) 230 vet_secp256k1_num n, m; 231 static const vet_secp256k1_fe negone = SECP256K1_FE_CONST( 232 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 233 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL 234 ); 235 /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ 236 static const unsigned char prime[32] = { 237 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 238 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 239 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 240 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F 241 }; 242 unsigned char b[32]; 243 int res; 244 vet_secp256k1_fe c = *a; 245 vet_secp256k1_fe_normalize_var(&c); 246 vet_secp256k1_fe_get_b32(b, &c); 247 vet_secp256k1_num_set_bin(&n, b, 32); 248 vet_secp256k1_num_set_bin(&m, prime, 32); 249 vet_secp256k1_num_mod_inverse(&n, &n, &m); 250 vet_secp256k1_num_get_bin(b, 32, &n); 251 res = vet_secp256k1_fe_set_b32(r, b); 252 (void)res; 253 VERIFY_CHECK(res); 254 /* Verify the result is the (unique) valid inverse using non-GMP code. */ 255 vet_secp256k1_fe_mul(&c, &c, r); 256 vet_secp256k1_fe_add(&c, &negone); 257 CHECK(vet_secp256k1_fe_normalizes_to_zero_var(&c)); 258 #else 259 #error "Please select field inverse implementation" 260 #endif 261 } 262 263 static void vet_secp256k1_fe_inv_all_var(vet_secp256k1_fe *r, const vet_secp256k1_fe *a, size_t len) { 264 vet_secp256k1_fe u; 265 size_t i; 266 if (len < 1) { 267 return; 268 } 269 270 VERIFY_CHECK((r + len <= a) || (a + len <= r)); 271 272 r[0] = a[0]; 273 274 i = 0; 275 while (++i < len) { 276 vet_secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]); 277 } 278 279 vet_secp256k1_fe_inv_var(&u, &r[--i]); 280 281 while (i > 0) { 282 size_t j = i--; 283 vet_secp256k1_fe_mul(&r[j], &r[i], &u); 284 vet_secp256k1_fe_mul(&u, &u, &a[j]); 285 } 286 287 r[0] = u; 288 } 289 290 static int vet_secp256k1_fe_is_quad_var(const vet_secp256k1_fe *a) { 291 #ifndef USE_NUM_NONE 292 unsigned char b[32]; 293 vet_secp256k1_num n; 294 vet_secp256k1_num m; 295 /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ 296 static const unsigned char prime[32] = { 297 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 298 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 299 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 300 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F 301 }; 302 303 vet_secp256k1_fe c = *a; 304 vet_secp256k1_fe_normalize_var(&c); 305 vet_secp256k1_fe_get_b32(b, &c); 306 vet_secp256k1_num_set_bin(&n, b, 32); 307 vet_secp256k1_num_set_bin(&m, prime, 32); 308 return vet_secp256k1_num_jacobi(&n, &m) >= 0; 309 #else 310 vet_secp256k1_fe r; 311 return vet_secp256k1_fe_sqrt(&r, a); 312 #endif 313 } 314 315 #endif