github.com/ethereumproject/go-ethereum@v5.5.2+incompatible/crypto/secp256k1/libsecp256k1/src/field_impl.h (about) 1 /********************************************************************** 2 * Copyright (c) 2013, 2014 Pieter Wuille * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.* 5 **********************************************************************/ 6 7 #ifndef _SECP256K1_FIELD_IMPL_H_ 8 #define _SECP256K1_FIELD_IMPL_H_ 9 10 #if defined HAVE_CONFIG_H 11 #include "libsecp256k1-config.h" 12 #endif 13 14 #include "util.h" 15 16 #if defined(USE_FIELD_10X26) 17 #include "field_10x26_impl.h" 18 #elif defined(USE_FIELD_5X52) 19 #include "field_5x52_impl.h" 20 #else 21 #error "Please select field implementation" 22 #endif 23 24 SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) { 25 secp256k1_fe na; 26 secp256k1_fe_negate(&na, a, 1); 27 secp256k1_fe_add(&na, b); 28 return secp256k1_fe_normalizes_to_zero_var(&na); 29 } 30 31 static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a) { 32 secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; 33 int j; 34 35 /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in 36 * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: 37 * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] 38 */ 39 40 secp256k1_fe_sqr(&x2, a); 41 secp256k1_fe_mul(&x2, &x2, a); 42 43 secp256k1_fe_sqr(&x3, &x2); 44 secp256k1_fe_mul(&x3, &x3, a); 45 46 x6 = x3; 47 for (j=0; j<3; j++) { 48 secp256k1_fe_sqr(&x6, &x6); 49 } 50 secp256k1_fe_mul(&x6, &x6, &x3); 51 52 x9 = x6; 53 for (j=0; j<3; j++) { 54 secp256k1_fe_sqr(&x9, &x9); 55 } 56 secp256k1_fe_mul(&x9, &x9, &x3); 57 58 x11 = x9; 59 for (j=0; j<2; j++) { 60 secp256k1_fe_sqr(&x11, &x11); 61 } 62 secp256k1_fe_mul(&x11, &x11, &x2); 63 64 x22 = x11; 65 for (j=0; j<11; j++) { 66 secp256k1_fe_sqr(&x22, &x22); 67 } 68 secp256k1_fe_mul(&x22, &x22, &x11); 69 70 x44 = x22; 71 for (j=0; j<22; j++) { 72 secp256k1_fe_sqr(&x44, &x44); 73 } 74 secp256k1_fe_mul(&x44, &x44, &x22); 75 76 x88 = x44; 77 for (j=0; j<44; j++) { 78 secp256k1_fe_sqr(&x88, &x88); 79 } 80 secp256k1_fe_mul(&x88, &x88, &x44); 81 82 x176 = x88; 83 for (j=0; j<88; j++) { 84 secp256k1_fe_sqr(&x176, &x176); 85 } 86 secp256k1_fe_mul(&x176, &x176, &x88); 87 88 x220 = x176; 89 for (j=0; j<44; j++) { 90 secp256k1_fe_sqr(&x220, &x220); 91 } 92 secp256k1_fe_mul(&x220, &x220, &x44); 93 94 x223 = x220; 95 for (j=0; j<3; j++) { 96 secp256k1_fe_sqr(&x223, &x223); 97 } 98 secp256k1_fe_mul(&x223, &x223, &x3); 99 100 /* The final result is then assembled using a sliding window over the blocks. */ 101 102 t1 = x223; 103 for (j=0; j<23; j++) { 104 secp256k1_fe_sqr(&t1, &t1); 105 } 106 secp256k1_fe_mul(&t1, &t1, &x22); 107 for (j=0; j<6; j++) { 108 secp256k1_fe_sqr(&t1, &t1); 109 } 110 secp256k1_fe_mul(&t1, &t1, &x2); 111 secp256k1_fe_sqr(&t1, &t1); 112 secp256k1_fe_sqr(r, &t1); 113 114 /* Check that a square root was actually calculated */ 115 116 secp256k1_fe_sqr(&t1, r); 117 return secp256k1_fe_equal_var(&t1, a); 118 } 119 120 static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { 121 secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; 122 int j; 123 124 /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in 125 * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: 126 * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] 127 */ 128 129 secp256k1_fe_sqr(&x2, a); 130 secp256k1_fe_mul(&x2, &x2, a); 131 132 secp256k1_fe_sqr(&x3, &x2); 133 secp256k1_fe_mul(&x3, &x3, a); 134 135 x6 = x3; 136 for (j=0; j<3; j++) { 137 secp256k1_fe_sqr(&x6, &x6); 138 } 139 secp256k1_fe_mul(&x6, &x6, &x3); 140 141 x9 = x6; 142 for (j=0; j<3; j++) { 143 secp256k1_fe_sqr(&x9, &x9); 144 } 145 secp256k1_fe_mul(&x9, &x9, &x3); 146 147 x11 = x9; 148 for (j=0; j<2; j++) { 149 secp256k1_fe_sqr(&x11, &x11); 150 } 151 secp256k1_fe_mul(&x11, &x11, &x2); 152 153 x22 = x11; 154 for (j=0; j<11; j++) { 155 secp256k1_fe_sqr(&x22, &x22); 156 } 157 secp256k1_fe_mul(&x22, &x22, &x11); 158 159 x44 = x22; 160 for (j=0; j<22; j++) { 161 secp256k1_fe_sqr(&x44, &x44); 162 } 163 secp256k1_fe_mul(&x44, &x44, &x22); 164 165 x88 = x44; 166 for (j=0; j<44; j++) { 167 secp256k1_fe_sqr(&x88, &x88); 168 } 169 secp256k1_fe_mul(&x88, &x88, &x44); 170 171 x176 = x88; 172 for (j=0; j<88; j++) { 173 secp256k1_fe_sqr(&x176, &x176); 174 } 175 secp256k1_fe_mul(&x176, &x176, &x88); 176 177 x220 = x176; 178 for (j=0; j<44; j++) { 179 secp256k1_fe_sqr(&x220, &x220); 180 } 181 secp256k1_fe_mul(&x220, &x220, &x44); 182 183 x223 = x220; 184 for (j=0; j<3; j++) { 185 secp256k1_fe_sqr(&x223, &x223); 186 } 187 secp256k1_fe_mul(&x223, &x223, &x3); 188 189 /* The final result is then assembled using a sliding window over the blocks. */ 190 191 t1 = x223; 192 for (j=0; j<23; j++) { 193 secp256k1_fe_sqr(&t1, &t1); 194 } 195 secp256k1_fe_mul(&t1, &t1, &x22); 196 for (j=0; j<5; j++) { 197 secp256k1_fe_sqr(&t1, &t1); 198 } 199 secp256k1_fe_mul(&t1, &t1, a); 200 for (j=0; j<3; j++) { 201 secp256k1_fe_sqr(&t1, &t1); 202 } 203 secp256k1_fe_mul(&t1, &t1, &x2); 204 for (j=0; j<2; j++) { 205 secp256k1_fe_sqr(&t1, &t1); 206 } 207 secp256k1_fe_mul(r, a, &t1); 208 } 209 210 static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { 211 #if defined(USE_FIELD_INV_BUILTIN) 212 secp256k1_fe_inv(r, a); 213 #elif defined(USE_FIELD_INV_NUM) 214 secp256k1_num n, m; 215 static const secp256k1_fe negone = SECP256K1_FE_CONST( 216 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 217 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL 218 ); 219 /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ 220 static const unsigned char prime[32] = { 221 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 222 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 223 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 224 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F 225 }; 226 unsigned char b[32]; 227 secp256k1_fe c = *a; 228 secp256k1_fe_normalize_var(&c); 229 secp256k1_fe_get_b32(b, &c); 230 secp256k1_num_set_bin(&n, b, 32); 231 secp256k1_num_set_bin(&m, prime, 32); 232 secp256k1_num_mod_inverse(&n, &n, &m); 233 secp256k1_num_get_bin(b, 32, &n); 234 VERIFY_CHECK(secp256k1_fe_set_b32(r, b)); 235 /* Verify the result is the (unique) valid inverse using non-GMP code. */ 236 secp256k1_fe_mul(&c, &c, r); 237 secp256k1_fe_add(&c, &negone); 238 CHECK(secp256k1_fe_normalizes_to_zero_var(&c)); 239 #else 240 #error "Please select field inverse implementation" 241 #endif 242 } 243 244 static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a) { 245 secp256k1_fe u; 246 size_t i; 247 if (len < 1) { 248 return; 249 } 250 251 VERIFY_CHECK((r + len <= a) || (a + len <= r)); 252 253 r[0] = a[0]; 254 255 i = 0; 256 while (++i < len) { 257 secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]); 258 } 259 260 secp256k1_fe_inv_var(&u, &r[--i]); 261 262 while (i > 0) { 263 size_t j = i--; 264 secp256k1_fe_mul(&r[j], &r[i], &u); 265 secp256k1_fe_mul(&u, &u, &a[j]); 266 } 267 268 r[0] = u; 269 } 270 271 #endif