github.com/hellobchain/newcryptosm@v0.0.0-20221019060107-edb949a317e9/sm2/sm2.go (about) 1 //go:build !amd64 2 // +build !amd64 3 4 /* 5 Copyright Suzhou Tongji Fintech Research Institute 2017 All Rights Reserved. 6 Licensed under the Apache License, Version 2.0 (the "License"); 7 you may not use this file except in compliance with the License. 8 You may obtain a copy of the License at 9 10 http://www.apache.org/licenses/LICENSE-2.0 11 12 Unless required by applicable law or agreed to in writing, software 13 distributed under the License is distributed on an "AS IS" BASIS, 14 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 See the License for the specific language governing permissions and 16 limitations under the License. 17 */ 18 package sm2 19 20 import ( 21 "crypto/elliptic" 22 "math/big" 23 "sync" 24 ) 25 26 type sm2Curve struct { 27 RInverse *big.Int 28 *elliptic.CurveParams 29 a, b, gx, gy sm2P256FieldElement 30 } 31 32 var initonce sync.Once 33 var sm2P256 sm2Curve 34 35 type sm2P256FieldElement [9]uint32 36 type sm2P256LargeFieldElement [17]uint64 37 38 const ( 39 bottom28Bits = 0xFFFFFFF 40 bottom29Bits = 0x1FFFFFFF 41 ) 42 43 func initP256Sm2() { 44 sm2P256.CurveParams = &elliptic.CurveParams{Name: "SM2"} // sm2 45 A, _ := new(big.Int).SetString("FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000FFFFFFFFFFFFFFFC", 16) 46 //SM2椭 椭 圆 曲 线 公 钥 密 码 算 法 推 荐 曲 线 参 数 47 sm2P256.P, _ = new(big.Int).SetString("FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000FFFFFFFFFFFFFFFF", 16) 48 sm2P256.N, _ = new(big.Int).SetString("FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFF7203DF6B21C6052B53BBF40939D54123", 16) 49 sm2P256.B, _ = new(big.Int).SetString("28E9FA9E9D9F5E344D5A9E4BCF6509A7F39789F515AB8F92DDBCBD414D940E93", 16) 50 sm2P256.Gx, _ = new(big.Int).SetString("32C4AE2C1F1981195F9904466A39C9948FE30BBFF2660BE1715A4589334C74C7", 16) 51 sm2P256.Gy, _ = new(big.Int).SetString("BC3736A2F4F6779C59BDCEE36B692153D0A9877CC62A474002DF32E52139F0A0", 16) 52 sm2P256.RInverse, _ = new(big.Int).SetString("7ffffffd80000002fffffffe000000017ffffffe800000037ffffffc80000002", 16) 53 sm2P256.BitSize = 256 54 sm2P256FromBig(&sm2P256.a, A) 55 sm2P256FromBig(&sm2P256.gx, sm2P256.Gx) 56 sm2P256FromBig(&sm2P256.gy, sm2P256.Gy) 57 sm2P256FromBig(&sm2P256.b, sm2P256.B) 58 } 59 60 func SM2() elliptic.Curve { 61 initonce.Do(initP256Sm2) 62 return sm2P256 63 } 64 65 func (curve sm2Curve) Params() *elliptic.CurveParams { 66 return sm2P256.CurveParams 67 } 68 69 // IsOnCurve y^2 = x^3 + ax + b 70 func (curve sm2Curve) IsOnCurve(X, Y *big.Int) bool { 71 var a, x, y, y2, x3 sm2P256FieldElement 72 73 sm2P256FromBig(&x, X) 74 sm2P256FromBig(&y, Y) 75 76 sm2P256Square(&x3, &x) // x3 = x ^ 2 77 sm2P256Mul(&x3, &x3, &x) // x3 = x ^ 2 * x 78 sm2P256Mul(&a, &curve.a, &x) // a = a * x 79 sm2P256Add(&x3, &x3, &a) 80 sm2P256Add(&x3, &x3, &curve.b) 81 82 sm2P256Square(&y2, &y) // y2 = y ^ 2 83 return sm2P256ToBig(&x3).Cmp(sm2P256ToBig(&y2)) == 0 84 } 85 86 func zForAffine(x, y *big.Int) *big.Int { 87 z := new(big.Int) 88 if x.Sign() != 0 || y.Sign() != 0 { 89 z.SetInt64(1) 90 } 91 return z 92 } 93 94 func (curve sm2Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { 95 var X1, Y1, Z1, X2, Y2, Z2, X3, Y3, Z3 sm2P256FieldElement 96 97 z1 := zForAffine(x1, y1) 98 z2 := zForAffine(x2, y2) 99 sm2P256FromBig(&X1, x1) 100 sm2P256FromBig(&Y1, y1) 101 sm2P256FromBig(&Z1, z1) 102 sm2P256FromBig(&X2, x2) 103 sm2P256FromBig(&Y2, y2) 104 sm2P256FromBig(&Z2, z2) 105 sm2P256PointAdd(&X1, &Y1, &Z1, &X2, &Y2, &Z2, &X3, &Y3, &Z3) 106 return sm2P256ToAffine(&X3, &Y3, &Z3) 107 } 108 109 func (curve sm2Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { 110 var X1, Y1, Z1 sm2P256FieldElement 111 112 z1 := zForAffine(x1, y1) 113 sm2P256FromBig(&X1, x1) 114 sm2P256FromBig(&Y1, y1) 115 sm2P256FromBig(&Z1, z1) 116 sm2P256PointDouble(&X1, &Y1, &Z1, &X1, &Y1, &Z1) 117 return sm2P256ToAffine(&X1, &Y1, &Z1) 118 } 119 120 func (curve sm2Curve) ScalarMult(x1, y1 *big.Int, k []byte) (*big.Int, *big.Int) { 121 var X, Y, Z, X1, Y1 sm2P256FieldElement 122 sm2P256FromBig(&X1, x1) 123 sm2P256FromBig(&Y1, y1) 124 scalar := sm2GenrateWNaf(k) 125 scalarReversed := WNafReversed(scalar) 126 sm2P256ScalarMult(&X, &Y, &Z, &X1, &Y1, scalarReversed) 127 return sm2P256ToAffine(&X, &Y, &Z) 128 } 129 130 func (curve sm2Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { 131 var scalarReversed [32]byte 132 var X, Y, Z sm2P256FieldElement 133 134 sm2P256GetScalar(&scalarReversed, k) 135 sm2P256ScalarBaseMult(&X, &Y, &Z, &scalarReversed) 136 return sm2P256ToAffine(&X, &Y, &Z) 137 } 138 139 var sm2P256Precomputed = [9 * 2 * 15 * 2]uint32{ 140 0x830053d, 0x328990f, 0x6c04fe1, 0xc0f72e5, 0x1e19f3c, 0x666b093, 0x175a87b, 0xec38276, 0x222cf4b, 141 0x185a1bba, 0x354e593, 0x1295fac1, 0xf2bc469, 0x47c60fa, 0xc19b8a9, 0xf63533e, 0x903ae6b, 0xc79acba, 142 0x15b061a4, 0x33e020b, 0xdffb34b, 0xfcf2c8, 0x16582e08, 0x262f203, 0xfb34381, 0xa55452, 0x604f0ff, 143 0x41f1f90, 0xd64ced2, 0xee377bf, 0x75f05f0, 0x189467ae, 0xe2244e, 0x1e7700e8, 0x3fbc464, 0x9612d2e, 144 0x1341b3b8, 0xee84e23, 0x1edfa5b4, 0x14e6030, 0x19e87be9, 0x92f533c, 0x1665d96c, 0x226653e, 0xa238d3e, 145 0xf5c62c, 0x95bb7a, 0x1f0e5a41, 0x28789c3, 0x1f251d23, 0x8726609, 0xe918910, 0x8096848, 0xf63d028, 146 0x152296a1, 0x9f561a8, 0x14d376fb, 0x898788a, 0x61a95fb, 0xa59466d, 0x159a003d, 0x1ad1698, 0x93cca08, 147 0x1b314662, 0x706e006, 0x11ce1e30, 0x97b710, 0x172fbc0d, 0x8f50158, 0x11c7ffe7, 0xd182cce, 0xc6ad9e8, 148 0x12ea31b2, 0xc4e4f38, 0x175b0d96, 0xec06337, 0x75a9c12, 0xb001fdf, 0x93e82f5, 0x34607de, 0xb8035ed, 149 0x17f97924, 0x75cf9e6, 0xdceaedd, 0x2529924, 0x1a10c5ff, 0xb1a54dc, 0x19464d8, 0x2d1997, 0xde6a110, 150 0x1e276ee5, 0x95c510c, 0x1aca7c7a, 0xfe48aca, 0x121ad4d9, 0xe4132c6, 0x8239b9d, 0x40ea9cd, 0x816c7b, 151 0x632d7a4, 0xa679813, 0x5911fcf, 0x82b0f7c, 0x57b0ad5, 0xbef65, 0xd541365, 0x7f9921f, 0xc62e7a, 152 0x3f4b32d, 0x58e50e1, 0x6427aed, 0xdcdda67, 0xe8c2d3e, 0x6aa54a4, 0x18df4c35, 0x49a6a8e, 0x3cd3d0c, 153 0xd7adf2, 0xcbca97, 0x1bda5f2d, 0x3258579, 0x606b1e6, 0x6fc1b5b, 0x1ac27317, 0x503ca16, 0xa677435, 154 0x57bc73, 0x3992a42, 0xbab987b, 0xfab25eb, 0x128912a4, 0x90a1dc4, 0x1402d591, 0x9ffbcfc, 0xaa48856, 155 0x7a7c2dc, 0xcefd08a, 0x1b29bda6, 0xa785641, 0x16462d8c, 0x76241b7, 0x79b6c3b, 0x204ae18, 0xf41212b, 156 0x1f567a4d, 0xd6ce6db, 0xedf1784, 0x111df34, 0x85d7955, 0x55fc189, 0x1b7ae265, 0xf9281ac, 0xded7740, 157 0xf19468b, 0x83763bb, 0x8ff7234, 0x3da7df8, 0x9590ac3, 0xdc96f2a, 0x16e44896, 0x7931009, 0x99d5acc, 158 0x10f7b842, 0xaef5e84, 0xc0310d7, 0xdebac2c, 0x2a7b137, 0x4342344, 0x19633649, 0x3a10624, 0x4b4cb56, 159 0x1d809c59, 0xac007f, 0x1f0f4bcd, 0xa1ab06e, 0xc5042cf, 0x82c0c77, 0x76c7563, 0x22c30f3, 0x3bf1568, 160 0x7a895be, 0xfcca554, 0x12e90e4c, 0x7b4ab5f, 0x13aeb76b, 0x5887e2c, 0x1d7fe1e3, 0x908c8e3, 0x95800ee, 161 0xb36bd54, 0xf08905d, 0x4e73ae8, 0xf5a7e48, 0xa67cb0, 0x50e1067, 0x1b944a0a, 0xf29c83a, 0xb23cfb9, 162 0xbe1db1, 0x54de6e8, 0xd4707f2, 0x8ebcc2d, 0x2c77056, 0x1568ce4, 0x15fcc849, 0x4069712, 0xe2ed85f, 163 0x2c5ff09, 0x42a6929, 0x628e7ea, 0xbd5b355, 0xaf0bd79, 0xaa03699, 0xdb99816, 0x4379cef, 0x81d57b, 164 0x11237f01, 0xe2a820b, 0xfd53b95, 0x6beb5ee, 0x1aeb790c, 0xe470d53, 0x2c2cfee, 0x1c1d8d8, 0xa520fc4, 165 0x1518e034, 0xa584dd4, 0x29e572b, 0xd4594fc, 0x141a8f6f, 0x8dfccf3, 0x5d20ba3, 0x2eb60c3, 0x9f16eb0, 166 0x11cec356, 0xf039f84, 0x1b0990c1, 0xc91e526, 0x10b65bae, 0xf0616e8, 0x173fa3ff, 0xec8ccf9, 0xbe32790, 167 0x11da3e79, 0xe2f35c7, 0x908875c, 0xdacf7bd, 0x538c165, 0x8d1487f, 0x7c31aed, 0x21af228, 0x7e1689d, 168 0xdfc23ca, 0x24f15dc, 0x25ef3c4, 0x35248cd, 0x99a0f43, 0xa4b6ecc, 0xd066b3, 0x2481152, 0x37a7688, 169 0x15a444b6, 0xb62300c, 0x4b841b, 0xa655e79, 0xd53226d, 0xbeb348a, 0x127f3c2, 0xb989247, 0x71a277d, 170 0x19e9dfcb, 0xb8f92d0, 0xe2d226c, 0x390a8b0, 0x183cc462, 0x7bd8167, 0x1f32a552, 0x5e02db4, 0xa146ee9, 171 0x1a003957, 0x1c95f61, 0x1eeec155, 0x26f811f, 0xf9596ba, 0x3082bfb, 0x96df083, 0x3e3a289, 0x7e2d8be, 172 0x157a63e0, 0x99b8941, 0x1da7d345, 0xcc6cd0, 0x10beed9a, 0x48e83c0, 0x13aa2e25, 0x7cad710, 0x4029988, 173 0x13dfa9dd, 0xb94f884, 0x1f4adfef, 0xb88543, 0x16f5f8dc, 0xa6a67f4, 0x14e274e2, 0x5e56cf4, 0x2f24ef, 174 0x1e9ef967, 0xfe09bad, 0xfe079b3, 0xcc0ae9e, 0xb3edf6d, 0x3e961bc, 0x130d7831, 0x31043d6, 0xba986f9, 175 0x1d28055, 0x65240ca, 0x4971fa3, 0x81b17f8, 0x11ec34a5, 0x8366ddc, 0x1471809, 0xfa5f1c6, 0xc911e15, 176 0x8849491, 0xcf4c2e2, 0x14471b91, 0x39f75be, 0x445c21e, 0xf1585e9, 0x72cc11f, 0x4c79f0c, 0xe5522e1, 177 0x1874c1ee, 0x4444211, 0x7914884, 0x3d1b133, 0x25ba3c, 0x4194f65, 0x1c0457ef, 0xac4899d, 0xe1fa66c, 178 0x130a7918, 0x9b8d312, 0x4b1c5c8, 0x61ccac3, 0x18c8aa6f, 0xe93cb0a, 0xdccb12c, 0xde10825, 0x969737d, 179 0xf58c0c3, 0x7cee6a9, 0xc2c329a, 0xc7f9ed9, 0x107b3981, 0x696a40e, 0x152847ff, 0x4d88754, 0xb141f47, 180 0x5a16ffe, 0x3a7870a, 0x18667659, 0x3b72b03, 0xb1c9435, 0x9285394, 0xa00005a, 0x37506c, 0x2edc0bb, 181 0x19afe392, 0xeb39cac, 0x177ef286, 0xdf87197, 0x19f844ed, 0x31fe8, 0x15f9bfd, 0x80dbec, 0x342e96e, 182 0x497aced, 0xe88e909, 0x1f5fa9ba, 0x530a6ee, 0x1ef4e3f1, 0x69ffd12, 0x583006d, 0x2ecc9b1, 0x362db70, 183 0x18c7bdc5, 0xf4bb3c5, 0x1c90b957, 0xf067c09, 0x9768f2b, 0xf73566a, 0x1939a900, 0x198c38a, 0x202a2a1, 184 0x4bbf5a6, 0x4e265bc, 0x1f44b6e7, 0x185ca49, 0xa39e81b, 0x24aff5b, 0x4acc9c2, 0x638bdd3, 0xb65b2a8, 185 0x6def8be, 0xb94537a, 0x10b81dee, 0xe00ec55, 0x2f2cdf7, 0xc20622d, 0x2d20f36, 0xe03c8c9, 0x898ea76, 186 0x8e3921b, 0x8905bff, 0x1e94b6c8, 0xee7ad86, 0x154797f2, 0xa620863, 0x3fbd0d9, 0x1f3caab, 0x30c24bd, 187 0x19d3892f, 0x59c17a2, 0x1ab4b0ae, 0xf8714ee, 0x90c4098, 0xa9c800d, 0x1910236b, 0xea808d3, 0x9ae2f31, 188 0x1a15ad64, 0xa48c8d1, 0x184635a4, 0xb725ef1, 0x11921dcc, 0x3f866df, 0x16c27568, 0xbdf580a, 0xb08f55c, 189 0x186ee1c, 0xb1627fa, 0x34e82f6, 0x933837e, 0xf311be5, 0xfedb03b, 0x167f72cd, 0xa5469c0, 0x9c82531, 190 0xb92a24b, 0x14fdc8b, 0x141980d1, 0xbdc3a49, 0x7e02bb1, 0xaf4e6dd, 0x106d99e1, 0xd4616fc, 0x93c2717, 191 0x1c0a0507, 0xc6d5fed, 0x9a03d8b, 0xa1d22b0, 0x127853e3, 0xc4ac6b8, 0x1a048cf7, 0x9afb72c, 0x65d485d, 192 0x72d5998, 0xe9fa744, 0xe49e82c, 0x253cf80, 0x5f777ce, 0xa3799a5, 0x17270cbb, 0xc1d1ef0, 0xdf74977, 193 0x114cb859, 0xfa8e037, 0xb8f3fe5, 0xc734cc6, 0x70d3d61, 0xeadac62, 0x12093dd0, 0x9add67d, 0x87200d6, 194 0x175bcbb, 0xb29b49f, 0x1806b79c, 0x12fb61f, 0x170b3a10, 0x3aaf1cf, 0xa224085, 0x79d26af, 0x97759e2, 195 0x92e19f1, 0xb32714d, 0x1f00d9f1, 0xc728619, 0x9e6f627, 0xe745e24, 0x18ea4ace, 0xfc60a41, 0x125f5b2, 196 0xc3cf512, 0x39ed486, 0xf4d15fa, 0xf9167fd, 0x1c1f5dd5, 0xc21a53e, 0x1897930, 0x957a112, 0x21059a0, 197 0x1f9e3ddc, 0xa4dfced, 0x8427f6f, 0x726fbe7, 0x1ea658f8, 0x2fdcd4c, 0x17e9b66f, 0xb2e7c2e, 0x39923bf, 198 0x1bae104, 0x3973ce5, 0xc6f264c, 0x3511b84, 0x124195d7, 0x11996bd, 0x20be23d, 0xdc437c4, 0x4b4f16b, 199 0x11902a0, 0x6c29cc9, 0x1d5ffbe6, 0xdb0b4c7, 0x10144c14, 0x2f2b719, 0x301189, 0x2343336, 0xa0bf2ac, 200 } 201 202 func sm2P256GetScalar(b *[32]byte, a []byte) { 203 var scalarBytes []byte 204 205 n := new(big.Int).SetBytes(a) 206 if n.Cmp(sm2P256.N) >= 0 { 207 n.Mod(n, sm2P256.N) 208 scalarBytes = n.Bytes() 209 } else { 210 scalarBytes = a 211 } 212 for i, v := range scalarBytes { 213 b[len(scalarBytes)-(1+i)] = v 214 } 215 } 216 217 func sm2P256PointAddMixed(xOut, yOut, zOut, x1, y1, z1, x2, y2 *sm2P256FieldElement) { 218 var z1z1, z1z1z1, s2, u2, h, i, j, r, rr, v, tmp sm2P256FieldElement 219 220 sm2P256Square(&z1z1, z1) 221 sm2P256Add(&tmp, z1, z1) 222 223 sm2P256Mul(&u2, x2, &z1z1) 224 sm2P256Mul(&z1z1z1, z1, &z1z1) 225 sm2P256Mul(&s2, y2, &z1z1z1) 226 sm2P256Sub(&h, &u2, x1) 227 sm2P256Add(&i, &h, &h) 228 sm2P256Square(&i, &i) 229 sm2P256Mul(&j, &h, &i) 230 sm2P256Sub(&r, &s2, y1) 231 sm2P256Add(&r, &r, &r) 232 sm2P256Mul(&v, x1, &i) 233 234 sm2P256Mul(zOut, &tmp, &h) 235 sm2P256Square(&rr, &r) 236 sm2P256Sub(xOut, &rr, &j) 237 sm2P256Sub(xOut, xOut, &v) 238 sm2P256Sub(xOut, xOut, &v) 239 240 sm2P256Sub(&tmp, &v, xOut) 241 sm2P256Mul(yOut, &tmp, &r) 242 sm2P256Mul(&tmp, y1, &j) 243 sm2P256Sub(yOut, yOut, &tmp) 244 sm2P256Sub(yOut, yOut, &tmp) 245 } 246 247 // sm2P256CopyConditional sets out=in if mask = 0xffffffff in constant time. 248 // 249 // On entry: mask is either 0 or 0xffffffff. 250 func sm2P256CopyConditional(out, in *sm2P256FieldElement, mask uint32) { 251 for i := 0; i < 9; i++ { 252 tmp := mask & (in[i] ^ out[i]) 253 out[i] ^= tmp 254 } 255 } 256 257 // sm2P256SelectAffinePoint sets {out_x,out_y} to the index'th entry of table. 258 // On entry: index < 16, table[0] must be zero. 259 func sm2P256SelectAffinePoint(xOut, yOut *sm2P256FieldElement, table []uint32, index uint32) { 260 for i := range xOut { 261 xOut[i] = 0 262 } 263 for i := range yOut { 264 yOut[i] = 0 265 } 266 267 for i := uint32(1); i < 16; i++ { 268 mask := i ^ index 269 mask |= mask >> 2 270 mask |= mask >> 1 271 mask &= 1 272 mask-- 273 for j := range xOut { 274 xOut[j] |= table[0] & mask 275 table = table[1:] 276 } 277 for j := range yOut { 278 yOut[j] |= table[0] & mask 279 table = table[1:] 280 } 281 } 282 } 283 284 // sm2P256SelectJacobianPoint sets {out_x,out_y,out_z} to the index'th entry of 285 // table. 286 // On entry: index < 16, table[0] must be zero. 287 func sm2P256SelectJacobianPoint(xOut, yOut, zOut *sm2P256FieldElement, table *[16][3]sm2P256FieldElement, index uint32) { 288 for i := range xOut { 289 xOut[i] = 0 290 } 291 for i := range yOut { 292 yOut[i] = 0 293 } 294 for i := range zOut { 295 zOut[i] = 0 296 } 297 298 // The implicit value at index 0 is all zero. We don't need to perform that 299 // iteration of the loop because we already set out_* to zero. 300 for i := uint32(1); i < 16; i++ { 301 mask := i ^ index 302 mask |= mask >> 2 303 mask |= mask >> 1 304 mask &= 1 305 mask-- 306 for j := range xOut { 307 xOut[j] |= table[i][0][j] & mask 308 } 309 for j := range yOut { 310 yOut[j] |= table[i][1][j] & mask 311 } 312 for j := range zOut { 313 zOut[j] |= table[i][2][j] & mask 314 } 315 } 316 } 317 318 // sm2P256GetBit returns the bit'th bit of scalar. 319 func sm2P256GetBit(scalar *[32]uint8, bit uint) uint32 { 320 return uint32(((scalar[bit>>3]) >> (bit & 7)) & 1) 321 } 322 323 // sm2P256ScalarBaseMult sets {xOut,yOut,zOut} = scalar*G where scalar is a 324 // little-endian number. Note that the value of scalar must be less than the 325 // order of the group. 326 func sm2P256ScalarBaseMult(xOut, yOut, zOut *sm2P256FieldElement, scalar *[32]uint8) { 327 nIsInfinityMask := ^uint32(0) 328 var px, py, tx, ty, tz sm2P256FieldElement 329 var pIsNoninfiniteMask, mask, tableOffset uint32 330 331 for i := range xOut { 332 xOut[i] = 0 333 } 334 for i := range yOut { 335 yOut[i] = 0 336 } 337 for i := range zOut { 338 zOut[i] = 0 339 } 340 341 // The loop adds bits at positions 0, 64, 128 and 192, followed by 342 // positions 32,96,160 and 224 and does this 32 times. 343 for i := uint(0); i < 32; i++ { 344 if i != 0 { 345 sm2P256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut) 346 } 347 tableOffset = 0 348 for j := uint(0); j <= 32; j += 32 { 349 bit0 := sm2P256GetBit(scalar, 31-i+j) 350 bit1 := sm2P256GetBit(scalar, 95-i+j) 351 bit2 := sm2P256GetBit(scalar, 159-i+j) 352 bit3 := sm2P256GetBit(scalar, 223-i+j) 353 index := bit0 | (bit1 << 1) | (bit2 << 2) | (bit3 << 3) 354 355 sm2P256SelectAffinePoint(&px, &py, sm2P256Precomputed[tableOffset:], index) 356 tableOffset += 30 * 9 357 358 // Since scalar is less than the order of the group, we know that 359 // {xOut,yOut,zOut} != {px,py,1}, unless both are zero, which we handle 360 // below. 361 sm2P256PointAddMixed(&tx, &ty, &tz, xOut, yOut, zOut, &px, &py) 362 // The result of pointAddMixed is incorrect if {xOut,yOut,zOut} is zero 363 // (a.k.a. the point at infinity). We handle that situation by 364 // copying the point from the table. 365 sm2P256CopyConditional(xOut, &px, nIsInfinityMask) 366 sm2P256CopyConditional(yOut, &py, nIsInfinityMask) 367 sm2P256CopyConditional(zOut, &sm2P256Factor[1], nIsInfinityMask) 368 369 // Equally, the result is also wrong if the point from the table is 370 // zero, which happens when the index is zero. We handle that by 371 // only copying from {tx,ty,tz} to {xOut,yOut,zOut} if index != 0. 372 pIsNoninfiniteMask = nonZeroToAllOnes(index) 373 mask = pIsNoninfiniteMask & ^nIsInfinityMask 374 sm2P256CopyConditional(xOut, &tx, mask) 375 sm2P256CopyConditional(yOut, &ty, mask) 376 sm2P256CopyConditional(zOut, &tz, mask) 377 // If p was not zero, then n is now non-zero. 378 nIsInfinityMask &^= pIsNoninfiniteMask 379 } 380 } 381 } 382 383 func sm2P256PointToAffine(xOut, yOut, x, y, z *sm2P256FieldElement) { 384 var zInv, zInvSq sm2P256FieldElement 385 386 zz := sm2P256ToBig(z) 387 zz.ModInverse(zz, sm2P256.P) 388 sm2P256FromBig(&zInv, zz) 389 390 sm2P256Square(&zInvSq, &zInv) 391 sm2P256Mul(xOut, x, &zInvSq) 392 sm2P256Mul(&zInv, &zInv, &zInvSq) 393 sm2P256Mul(yOut, y, &zInv) 394 } 395 396 func sm2P256ToAffine(x, y, z *sm2P256FieldElement) (xOut, yOut *big.Int) { 397 var xx, yy sm2P256FieldElement 398 399 sm2P256PointToAffine(&xx, &yy, x, y, z) 400 return sm2P256ToBig(&xx), sm2P256ToBig(&yy) 401 } 402 403 var sm2P256Factor = []sm2P256FieldElement{ 404 sm2P256FieldElement{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 405 sm2P256FieldElement{0x2, 0x0, 0x1FFFFF00, 0x7FF, 0x0, 0x0, 0x0, 0x2000000, 0x0}, 406 sm2P256FieldElement{0x4, 0x0, 0x1FFFFE00, 0xFFF, 0x0, 0x0, 0x0, 0x4000000, 0x0}, 407 sm2P256FieldElement{0x6, 0x0, 0x1FFFFD00, 0x17FF, 0x0, 0x0, 0x0, 0x6000000, 0x0}, 408 sm2P256FieldElement{0x8, 0x0, 0x1FFFFC00, 0x1FFF, 0x0, 0x0, 0x0, 0x8000000, 0x0}, 409 sm2P256FieldElement{0xA, 0x0, 0x1FFFFB00, 0x27FF, 0x0, 0x0, 0x0, 0xA000000, 0x0}, 410 sm2P256FieldElement{0xC, 0x0, 0x1FFFFA00, 0x2FFF, 0x0, 0x0, 0x0, 0xC000000, 0x0}, 411 sm2P256FieldElement{0xE, 0x0, 0x1FFFF900, 0x37FF, 0x0, 0x0, 0x0, 0xE000000, 0x0}, 412 sm2P256FieldElement{0x10, 0x0, 0x1FFFF800, 0x3FFF, 0x0, 0x0, 0x0, 0x0, 0x01}, 413 } 414 415 func sm2P256Scalar(b *sm2P256FieldElement, a int) { 416 sm2P256Mul(b, b, &sm2P256Factor[a]) 417 } 418 419 // (x3, y3, z3) = (x1, y1, z1) + (x2, y2, z2) 420 func sm2P256PointAdd(x1, y1, z1, x2, y2, z2, x3, y3, z3 *sm2P256FieldElement) { 421 var u1, u2, z22, z12, z23, z13, s1, s2, h, h2, r, r2, tm sm2P256FieldElement 422 423 if sm2P256ToBig(z1).Sign() == 0 { 424 sm2P256Dup(x3, x2) 425 sm2P256Dup(y3, y2) 426 sm2P256Dup(z3, z2) 427 return 428 } 429 430 if sm2P256ToBig(z2).Sign() == 0 { 431 sm2P256Dup(x3, x1) 432 sm2P256Dup(y3, y1) 433 sm2P256Dup(z3, z1) 434 return 435 } 436 437 sm2P256Square(&z12, z1) // z12 = z1 ^ 2 438 sm2P256Square(&z22, z2) // z22 = z2 ^ 2 439 440 sm2P256Mul(&z13, &z12, z1) // z13 = z1 ^ 3 441 sm2P256Mul(&z23, &z22, z2) // z23 = z2 ^ 3 442 443 sm2P256Mul(&u1, x1, &z22) // u1 = x1 * z2 ^ 2 444 sm2P256Mul(&u2, x2, &z12) // u2 = x2 * z1 ^ 2 445 446 sm2P256Mul(&s1, y1, &z23) // s1 = y1 * z2 ^ 3 447 sm2P256Mul(&s2, y2, &z13) // s2 = y2 * z1 ^ 3 448 449 if sm2P256ToBig(&u1).Cmp(sm2P256ToBig(&u2)) == 0 && 450 sm2P256ToBig(&s1).Cmp(sm2P256ToBig(&s2)) == 0 { 451 sm2P256PointDouble(x1, y1, z1, x1, y1, z1) 452 } 453 454 sm2P256Sub(&h, &u2, &u1) // h = u2 - u1 455 sm2P256Sub(&r, &s2, &s1) // r = s2 - s1 456 457 sm2P256Square(&r2, &r) // r2 = r ^ 2 458 sm2P256Square(&h2, &h) // h2 = h ^ 2 459 460 sm2P256Mul(&tm, &h2, &h) // tm = h ^ 3 461 sm2P256Sub(x3, &r2, &tm) 462 sm2P256Mul(&tm, &u1, &h2) 463 sm2P256Scalar(&tm, 2) // tm = 2 * (u1 * h ^ 2) 464 sm2P256Sub(x3, x3, &tm) // x3 = r ^ 2 - h ^ 3 - 2 * u1 * h ^ 2 465 466 sm2P256Mul(&tm, &u1, &h2) // tm = u1 * h ^ 2 467 sm2P256Sub(&tm, &tm, x3) // tm = u1 * h ^ 2 - x3 468 sm2P256Mul(y3, &r, &tm) 469 sm2P256Mul(&tm, &h2, &h) // tm = h ^ 3 470 sm2P256Mul(&tm, &tm, &s1) // tm = s1 * h ^ 3 471 sm2P256Sub(y3, y3, &tm) // y3 = r * (u1 * h ^ 2 - x3) - s1 * h ^ 3 472 473 sm2P256Mul(z3, z1, z2) 474 sm2P256Mul(z3, z3, &h) // z3 = z1 * z3 * h 475 } 476 477 // (x3, y3, z3) = (x1, y1, z1)- (x2, y2, z2) 478 func sm2P256PointSub(x1, y1, z1, x2, y2, z2, x3, y3, z3 *sm2P256FieldElement) { 479 var u1, u2, z22, z12, z23, z13, s1, s2, h, h2, r, r2, tm sm2P256FieldElement 480 y := sm2P256ToBig(y2) 481 zero := new(big.Int).SetInt64(0) 482 y.Sub(zero, y) 483 sm2P256FromBig(y2, y) 484 485 if sm2P256ToBig(z1).Sign() == 0 { 486 sm2P256Dup(x3, x2) 487 sm2P256Dup(y3, y2) 488 sm2P256Dup(z3, z2) 489 return 490 } 491 492 if sm2P256ToBig(z2).Sign() == 0 { 493 sm2P256Dup(x3, x1) 494 sm2P256Dup(y3, y1) 495 sm2P256Dup(z3, z1) 496 return 497 } 498 499 sm2P256Square(&z12, z1) // z12 = z1 ^ 2 500 sm2P256Square(&z22, z2) // z22 = z2 ^ 2 501 502 sm2P256Mul(&z13, &z12, z1) // z13 = z1 ^ 3 503 sm2P256Mul(&z23, &z22, z2) // z23 = z2 ^ 3 504 505 sm2P256Mul(&u1, x1, &z22) // u1 = x1 * z2 ^ 2 506 sm2P256Mul(&u2, x2, &z12) // u2 = x2 * z1 ^ 2 507 508 sm2P256Mul(&s1, y1, &z23) // s1 = y1 * z2 ^ 3 509 sm2P256Mul(&s2, y2, &z13) // s2 = y2 * z1 ^ 3 510 511 if sm2P256ToBig(&u1).Cmp(sm2P256ToBig(&u2)) == 0 && 512 sm2P256ToBig(&s1).Cmp(sm2P256ToBig(&s2)) == 0 { 513 sm2P256PointDouble(x1, y1, z1, x1, y1, z1) 514 } 515 516 sm2P256Sub(&h, &u2, &u1) // h = u2 - u1 517 sm2P256Sub(&r, &s2, &s1) // r = s2 - s1 518 519 sm2P256Square(&r2, &r) // r2 = r ^ 2 520 sm2P256Square(&h2, &h) // h2 = h ^ 2 521 522 sm2P256Mul(&tm, &h2, &h) // tm = h ^ 3 523 sm2P256Sub(x3, &r2, &tm) 524 sm2P256Mul(&tm, &u1, &h2) 525 sm2P256Scalar(&tm, 2) // tm = 2 * (u1 * h ^ 2) 526 sm2P256Sub(x3, x3, &tm) // x3 = r ^ 2 - h ^ 3 - 2 * u1 * h ^ 2 527 528 sm2P256Mul(&tm, &u1, &h2) // tm = u1 * h ^ 2 529 sm2P256Sub(&tm, &tm, x3) // tm = u1 * h ^ 2 - x3 530 sm2P256Mul(y3, &r, &tm) 531 sm2P256Mul(&tm, &h2, &h) // tm = h ^ 3 532 sm2P256Mul(&tm, &tm, &s1) // tm = s1 * h ^ 3 533 sm2P256Sub(y3, y3, &tm) // y3 = r * (u1 * h ^ 2 - x3) - s1 * h ^ 3 534 535 sm2P256Mul(z3, z1, z2) 536 sm2P256Mul(z3, z3, &h) // z3 = z1 * z3 * h 537 } 538 539 func sm2P256PointDouble(x3, y3, z3, x, y, z *sm2P256FieldElement) { 540 var s, m, m2, x2, y2, z2, z4, y4, az4 sm2P256FieldElement 541 542 sm2P256Square(&x2, x) // x2 = x ^ 2 543 sm2P256Square(&y2, y) // y2 = y ^ 2 544 sm2P256Square(&z2, z) // z2 = z ^ 2 545 546 sm2P256Square(&z4, z) // z4 = z ^ 2 547 sm2P256Mul(&z4, &z4, z) // z4 = z ^ 3 548 sm2P256Mul(&z4, &z4, z) // z4 = z ^ 4 549 550 sm2P256Square(&y4, y) // y4 = y ^ 2 551 sm2P256Mul(&y4, &y4, y) // y4 = y ^ 3 552 sm2P256Mul(&y4, &y4, y) // y4 = y ^ 4 553 sm2P256Scalar(&y4, 8) // y4 = 8 * y ^ 4 554 555 sm2P256Mul(&s, x, &y2) 556 sm2P256Scalar(&s, 4) // s = 4 * x * y ^ 2 557 558 sm2P256Dup(&m, &x2) 559 sm2P256Scalar(&m, 3) 560 sm2P256Mul(&az4, &sm2P256.a, &z4) 561 sm2P256Add(&m, &m, &az4) // m = 3 * x ^ 2 + a * z ^ 4 562 563 sm2P256Square(&m2, &m) // m2 = m ^ 2 564 565 sm2P256Add(z3, y, z) 566 sm2P256Square(z3, z3) 567 sm2P256Sub(z3, z3, &z2) 568 sm2P256Sub(z3, z3, &y2) // z' = (y + z) ^2 - z ^ 2 - y ^ 2 569 570 sm2P256Sub(x3, &m2, &s) 571 sm2P256Sub(x3, x3, &s) // x' = m2 - 2 * s 572 573 sm2P256Sub(y3, &s, x3) 574 sm2P256Mul(y3, y3, &m) 575 sm2P256Sub(y3, y3, &y4) // y' = m * (s - x') - 8 * y ^ 4 576 } 577 578 // p256Zero31 is 0 mod p. 579 var sm2P256Zero31 = sm2P256FieldElement{0x7FFFFFF8, 0x3FFFFFFC, 0x800003FC, 0x3FFFDFFC, 0x7FFFFFFC, 0x3FFFFFFC, 0x7FFFFFFC, 0x37FFFFFC, 0x7FFFFFFC} 580 581 // c = a + b 582 func sm2P256Add(c, a, b *sm2P256FieldElement) { 583 carry := uint32(0) 584 for i := 0; ; i++ { 585 c[i] = a[i] + b[i] 586 c[i] += carry 587 carry = c[i] >> 29 588 c[i] &= bottom29Bits 589 i++ 590 if i == 9 { 591 break 592 } 593 c[i] = a[i] + b[i] 594 c[i] += carry 595 carry = c[i] >> 28 596 c[i] &= bottom28Bits 597 } 598 sm2P256ReduceCarry(c, carry) 599 } 600 601 // c = a - b 602 func sm2P256Sub(c, a, b *sm2P256FieldElement) { 603 var carry uint32 604 605 for i := 0; ; i++ { 606 c[i] = a[i] - b[i] 607 c[i] += sm2P256Zero31[i] 608 c[i] += carry 609 carry = c[i] >> 29 610 c[i] &= bottom29Bits 611 i++ 612 if i == 9 { 613 break 614 } 615 c[i] = a[i] - b[i] 616 c[i] += sm2P256Zero31[i] 617 c[i] += carry 618 carry = c[i] >> 28 619 c[i] &= bottom28Bits 620 } 621 sm2P256ReduceCarry(c, carry) 622 } 623 624 // c = a * b 625 func sm2P256Mul(c, a, b *sm2P256FieldElement) { 626 var tmp sm2P256LargeFieldElement 627 628 tmp[0] = uint64(a[0]) * uint64(b[0]) 629 tmp[1] = uint64(a[0])*(uint64(b[1])<<0) + 630 uint64(a[1])*(uint64(b[0])<<0) 631 tmp[2] = uint64(a[0])*(uint64(b[2])<<0) + 632 uint64(a[1])*(uint64(b[1])<<1) + 633 uint64(a[2])*(uint64(b[0])<<0) 634 tmp[3] = uint64(a[0])*(uint64(b[3])<<0) + 635 uint64(a[1])*(uint64(b[2])<<0) + 636 uint64(a[2])*(uint64(b[1])<<0) + 637 uint64(a[3])*(uint64(b[0])<<0) 638 tmp[4] = uint64(a[0])*(uint64(b[4])<<0) + 639 uint64(a[1])*(uint64(b[3])<<1) + 640 uint64(a[2])*(uint64(b[2])<<0) + 641 uint64(a[3])*(uint64(b[1])<<1) + 642 uint64(a[4])*(uint64(b[0])<<0) 643 tmp[5] = uint64(a[0])*(uint64(b[5])<<0) + 644 uint64(a[1])*(uint64(b[4])<<0) + 645 uint64(a[2])*(uint64(b[3])<<0) + 646 uint64(a[3])*(uint64(b[2])<<0) + 647 uint64(a[4])*(uint64(b[1])<<0) + 648 uint64(a[5])*(uint64(b[0])<<0) 649 tmp[6] = uint64(a[0])*(uint64(b[6])<<0) + 650 uint64(a[1])*(uint64(b[5])<<1) + 651 uint64(a[2])*(uint64(b[4])<<0) + 652 uint64(a[3])*(uint64(b[3])<<1) + 653 uint64(a[4])*(uint64(b[2])<<0) + 654 uint64(a[5])*(uint64(b[1])<<1) + 655 uint64(a[6])*(uint64(b[0])<<0) 656 tmp[7] = uint64(a[0])*(uint64(b[7])<<0) + 657 uint64(a[1])*(uint64(b[6])<<0) + 658 uint64(a[2])*(uint64(b[5])<<0) + 659 uint64(a[3])*(uint64(b[4])<<0) + 660 uint64(a[4])*(uint64(b[3])<<0) + 661 uint64(a[5])*(uint64(b[2])<<0) + 662 uint64(a[6])*(uint64(b[1])<<0) + 663 uint64(a[7])*(uint64(b[0])<<0) 664 // tmp[8] has the greatest value but doesn't overflow. See logic in 665 // p256Square. 666 tmp[8] = uint64(a[0])*(uint64(b[8])<<0) + 667 uint64(a[1])*(uint64(b[7])<<1) + 668 uint64(a[2])*(uint64(b[6])<<0) + 669 uint64(a[3])*(uint64(b[5])<<1) + 670 uint64(a[4])*(uint64(b[4])<<0) + 671 uint64(a[5])*(uint64(b[3])<<1) + 672 uint64(a[6])*(uint64(b[2])<<0) + 673 uint64(a[7])*(uint64(b[1])<<1) + 674 uint64(a[8])*(uint64(b[0])<<0) 675 tmp[9] = uint64(a[1])*(uint64(b[8])<<0) + 676 uint64(a[2])*(uint64(b[7])<<0) + 677 uint64(a[3])*(uint64(b[6])<<0) + 678 uint64(a[4])*(uint64(b[5])<<0) + 679 uint64(a[5])*(uint64(b[4])<<0) + 680 uint64(a[6])*(uint64(b[3])<<0) + 681 uint64(a[7])*(uint64(b[2])<<0) + 682 uint64(a[8])*(uint64(b[1])<<0) 683 tmp[10] = uint64(a[2])*(uint64(b[8])<<0) + 684 uint64(a[3])*(uint64(b[7])<<1) + 685 uint64(a[4])*(uint64(b[6])<<0) + 686 uint64(a[5])*(uint64(b[5])<<1) + 687 uint64(a[6])*(uint64(b[4])<<0) + 688 uint64(a[7])*(uint64(b[3])<<1) + 689 uint64(a[8])*(uint64(b[2])<<0) 690 tmp[11] = uint64(a[3])*(uint64(b[8])<<0) + 691 uint64(a[4])*(uint64(b[7])<<0) + 692 uint64(a[5])*(uint64(b[6])<<0) + 693 uint64(a[6])*(uint64(b[5])<<0) + 694 uint64(a[7])*(uint64(b[4])<<0) + 695 uint64(a[8])*(uint64(b[3])<<0) 696 tmp[12] = uint64(a[4])*(uint64(b[8])<<0) + 697 uint64(a[5])*(uint64(b[7])<<1) + 698 uint64(a[6])*(uint64(b[6])<<0) + 699 uint64(a[7])*(uint64(b[5])<<1) + 700 uint64(a[8])*(uint64(b[4])<<0) 701 tmp[13] = uint64(a[5])*(uint64(b[8])<<0) + 702 uint64(a[6])*(uint64(b[7])<<0) + 703 uint64(a[7])*(uint64(b[6])<<0) + 704 uint64(a[8])*(uint64(b[5])<<0) 705 tmp[14] = uint64(a[6])*(uint64(b[8])<<0) + 706 uint64(a[7])*(uint64(b[7])<<1) + 707 uint64(a[8])*(uint64(b[6])<<0) 708 tmp[15] = uint64(a[7])*(uint64(b[8])<<0) + 709 uint64(a[8])*(uint64(b[7])<<0) 710 tmp[16] = uint64(a[8]) * (uint64(b[8]) << 0) 711 sm2P256ReduceDegree(c, &tmp) 712 } 713 714 // b = a * a 715 func sm2P256Square(b, a *sm2P256FieldElement) { 716 var tmp sm2P256LargeFieldElement 717 718 tmp[0] = uint64(a[0]) * uint64(a[0]) 719 tmp[1] = uint64(a[0]) * (uint64(a[1]) << 1) 720 tmp[2] = uint64(a[0])*(uint64(a[2])<<1) + 721 uint64(a[1])*(uint64(a[1])<<1) 722 tmp[3] = uint64(a[0])*(uint64(a[3])<<1) + 723 uint64(a[1])*(uint64(a[2])<<1) 724 tmp[4] = uint64(a[0])*(uint64(a[4])<<1) + 725 uint64(a[1])*(uint64(a[3])<<2) + 726 uint64(a[2])*uint64(a[2]) 727 tmp[5] = uint64(a[0])*(uint64(a[5])<<1) + 728 uint64(a[1])*(uint64(a[4])<<1) + 729 uint64(a[2])*(uint64(a[3])<<1) 730 tmp[6] = uint64(a[0])*(uint64(a[6])<<1) + 731 uint64(a[1])*(uint64(a[5])<<2) + 732 uint64(a[2])*(uint64(a[4])<<1) + 733 uint64(a[3])*(uint64(a[3])<<1) 734 tmp[7] = uint64(a[0])*(uint64(a[7])<<1) + 735 uint64(a[1])*(uint64(a[6])<<1) + 736 uint64(a[2])*(uint64(a[5])<<1) + 737 uint64(a[3])*(uint64(a[4])<<1) 738 // tmp[8] has the greatest value of 2**61 + 2**60 + 2**61 + 2**60 + 2**60, 739 // which is < 2**64 as required. 740 tmp[8] = uint64(a[0])*(uint64(a[8])<<1) + 741 uint64(a[1])*(uint64(a[7])<<2) + 742 uint64(a[2])*(uint64(a[6])<<1) + 743 uint64(a[3])*(uint64(a[5])<<2) + 744 uint64(a[4])*uint64(a[4]) 745 tmp[9] = uint64(a[1])*(uint64(a[8])<<1) + 746 uint64(a[2])*(uint64(a[7])<<1) + 747 uint64(a[3])*(uint64(a[6])<<1) + 748 uint64(a[4])*(uint64(a[5])<<1) 749 tmp[10] = uint64(a[2])*(uint64(a[8])<<1) + 750 uint64(a[3])*(uint64(a[7])<<2) + 751 uint64(a[4])*(uint64(a[6])<<1) + 752 uint64(a[5])*(uint64(a[5])<<1) 753 tmp[11] = uint64(a[3])*(uint64(a[8])<<1) + 754 uint64(a[4])*(uint64(a[7])<<1) + 755 uint64(a[5])*(uint64(a[6])<<1) 756 tmp[12] = uint64(a[4])*(uint64(a[8])<<1) + 757 uint64(a[5])*(uint64(a[7])<<2) + 758 uint64(a[6])*uint64(a[6]) 759 tmp[13] = uint64(a[5])*(uint64(a[8])<<1) + 760 uint64(a[6])*(uint64(a[7])<<1) 761 tmp[14] = uint64(a[6])*(uint64(a[8])<<1) + 762 uint64(a[7])*(uint64(a[7])<<1) 763 tmp[15] = uint64(a[7]) * (uint64(a[8]) << 1) 764 tmp[16] = uint64(a[8]) * uint64(a[8]) 765 sm2P256ReduceDegree(b, &tmp) 766 } 767 768 // nonZeroToAllOnes returns: 769 // 0xffffffff for 0 < x <= 2**31 770 // 0 for x == 0 or x > 2**31. 771 func nonZeroToAllOnes(x uint32) uint32 { 772 return ((x - 1) >> 31) - 1 773 } 774 775 var sm2P256Carry = [8 * 9]uint32{ 776 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 777 0x2, 0x0, 0x1FFFFF00, 0x7FF, 0x0, 0x0, 0x0, 0x2000000, 0x0, 778 0x4, 0x0, 0x1FFFFE00, 0xFFF, 0x0, 0x0, 0x0, 0x4000000, 0x0, 779 0x6, 0x0, 0x1FFFFD00, 0x17FF, 0x0, 0x0, 0x0, 0x6000000, 0x0, 780 0x8, 0x0, 0x1FFFFC00, 0x1FFF, 0x0, 0x0, 0x0, 0x8000000, 0x0, 781 0xA, 0x0, 0x1FFFFB00, 0x27FF, 0x0, 0x0, 0x0, 0xA000000, 0x0, 782 0xC, 0x0, 0x1FFFFA00, 0x2FFF, 0x0, 0x0, 0x0, 0xC000000, 0x0, 783 0xE, 0x0, 0x1FFFF900, 0x37FF, 0x0, 0x0, 0x0, 0xE000000, 0x0, 784 } 785 786 // carry < 2 ^ 3 787 func sm2P256ReduceCarry(a *sm2P256FieldElement, carry uint32) { 788 a[0] += sm2P256Carry[carry*9+0] 789 a[2] += sm2P256Carry[carry*9+2] 790 a[3] += sm2P256Carry[carry*9+3] 791 a[7] += sm2P256Carry[carry*9+7] 792 } 793 794 func sm2P256ReduceDegree(a *sm2P256FieldElement, b *sm2P256LargeFieldElement) { 795 var tmp [18]uint32 796 var carry, x, xMask uint32 797 798 // tmp 799 // 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 ... 800 // 29 | 28 | 29 | 28 | 29 | 28 | 29 | 28 | 29 | 28 | 29 ... 801 tmp[0] = uint32(b[0]) & bottom29Bits 802 tmp[1] = uint32(b[0]) >> 29 803 tmp[1] |= (uint32(b[0]>>32) << 3) & bottom28Bits 804 tmp[1] += uint32(b[1]) & bottom28Bits 805 carry = tmp[1] >> 28 806 tmp[1] &= bottom28Bits 807 for i := 2; i < 17; i++ { 808 tmp[i] = (uint32(b[i-2] >> 32)) >> 25 809 tmp[i] += (uint32(b[i-1])) >> 28 810 tmp[i] += (uint32(b[i-1]>>32) << 4) & bottom29Bits 811 tmp[i] += uint32(b[i]) & bottom29Bits 812 tmp[i] += carry 813 carry = tmp[i] >> 29 814 tmp[i] &= bottom29Bits 815 816 i++ 817 if i == 17 { 818 break 819 } 820 tmp[i] = uint32(b[i-2]>>32) >> 25 821 tmp[i] += uint32(b[i-1]) >> 29 822 tmp[i] += ((uint32(b[i-1] >> 32)) << 3) & bottom28Bits 823 tmp[i] += uint32(b[i]) & bottom28Bits 824 tmp[i] += carry 825 carry = tmp[i] >> 28 826 tmp[i] &= bottom28Bits 827 } 828 tmp[17] = uint32(b[15]>>32) >> 25 829 tmp[17] += uint32(b[16]) >> 29 830 tmp[17] += uint32(b[16]>>32) << 3 831 tmp[17] += carry 832 833 for i := 0; ; i += 2 { 834 835 tmp[i+1] += tmp[i] >> 29 836 x = tmp[i] & bottom29Bits 837 tmp[i] = 0 838 if x > 0 { 839 set4 := uint32(0) 840 set7 := uint32(0) 841 xMask = nonZeroToAllOnes(x) 842 tmp[i+2] += (x << 7) & bottom29Bits 843 tmp[i+3] += x >> 22 844 if tmp[i+3] < 0x10000000 { 845 set4 = 1 846 tmp[i+3] += 0x10000000 & xMask 847 tmp[i+3] -= (x << 10) & bottom28Bits 848 } else { 849 tmp[i+3] -= (x << 10) & bottom28Bits 850 } 851 if tmp[i+4] < 0x20000000 { 852 tmp[i+4] += 0x20000000 & xMask 853 tmp[i+4] -= set4 // 借位 854 tmp[i+4] -= x >> 18 855 if tmp[i+5] < 0x10000000 { 856 tmp[i+5] += 0x10000000 & xMask 857 tmp[i+5] -= 1 // 借位 858 if tmp[i+6] < 0x20000000 { 859 set7 = 1 860 tmp[i+6] += 0x20000000 & xMask 861 tmp[i+6] -= 1 // 借位 862 } else { 863 tmp[i+6] -= 1 // 借位 864 } 865 } else { 866 tmp[i+5] -= 1 867 } 868 } else { 869 tmp[i+4] -= set4 // 借位 870 tmp[i+4] -= x >> 18 871 } 872 if tmp[i+7] < 0x10000000 { 873 tmp[i+7] += 0x10000000 & xMask 874 tmp[i+7] -= set7 875 tmp[i+7] -= (x << 24) & bottom28Bits 876 tmp[i+8] += (x << 28) & bottom29Bits 877 if tmp[i+8] < 0x20000000 { 878 tmp[i+8] += 0x20000000 & xMask 879 tmp[i+8] -= 1 880 tmp[i+8] -= x >> 4 881 tmp[i+9] += ((x >> 1) - 1) & xMask 882 } else { 883 tmp[i+8] -= 1 884 tmp[i+8] -= x >> 4 885 tmp[i+9] += (x >> 1) & xMask 886 } 887 } else { 888 tmp[i+7] -= set7 // 借位 889 tmp[i+7] -= (x << 24) & bottom28Bits 890 tmp[i+8] += (x << 28) & bottom29Bits 891 if tmp[i+8] < 0x20000000 { 892 tmp[i+8] += 0x20000000 & xMask 893 tmp[i+8] -= x >> 4 894 tmp[i+9] += ((x >> 1) - 1) & xMask 895 } else { 896 tmp[i+8] -= x >> 4 897 tmp[i+9] += (x >> 1) & xMask 898 } 899 } 900 901 } 902 903 if i+1 == 9 { 904 break 905 } 906 907 tmp[i+2] += tmp[i+1] >> 28 908 x = tmp[i+1] & bottom28Bits 909 tmp[i+1] = 0 910 if x > 0 { 911 set5 := uint32(0) 912 set8 := uint32(0) 913 set9 := uint32(0) 914 xMask = nonZeroToAllOnes(x) 915 tmp[i+3] += (x << 7) & bottom28Bits 916 tmp[i+4] += x >> 21 917 if tmp[i+4] < 0x20000000 { 918 set5 = 1 919 tmp[i+4] += 0x20000000 & xMask 920 tmp[i+4] -= (x << 11) & bottom29Bits 921 } else { 922 tmp[i+4] -= (x << 11) & bottom29Bits 923 } 924 if tmp[i+5] < 0x10000000 { 925 tmp[i+5] += 0x10000000 & xMask 926 tmp[i+5] -= set5 // 借位 927 tmp[i+5] -= x >> 18 928 if tmp[i+6] < 0x20000000 { 929 tmp[i+6] += 0x20000000 & xMask 930 tmp[i+6] -= 1 // 借位 931 if tmp[i+7] < 0x10000000 { 932 set8 = 1 933 tmp[i+7] += 0x10000000 & xMask 934 tmp[i+7] -= 1 // 借位 935 } else { 936 tmp[i+7] -= 1 // 借位 937 } 938 } else { 939 tmp[i+6] -= 1 // 借位 940 } 941 } else { 942 tmp[i+5] -= set5 // 借位 943 tmp[i+5] -= x >> 18 944 } 945 if tmp[i+8] < 0x20000000 { 946 set9 = 1 947 tmp[i+8] += 0x20000000 & xMask 948 tmp[i+8] -= set8 949 tmp[i+8] -= (x << 25) & bottom29Bits 950 } else { 951 tmp[i+8] -= set8 952 tmp[i+8] -= (x << 25) & bottom29Bits 953 } 954 if tmp[i+9] < 0x10000000 { 955 tmp[i+9] += 0x10000000 & xMask 956 tmp[i+9] -= set9 // 借位 957 tmp[i+9] -= x >> 4 958 tmp[i+10] += (x - 1) & xMask 959 } else { 960 tmp[i+9] -= set9 // 借位 961 tmp[i+9] -= x >> 4 962 tmp[i+10] += x & xMask 963 } 964 } 965 } 966 967 carry = uint32(0) 968 for i := 0; i < 8; i++ { 969 a[i] = tmp[i+9] 970 a[i] += carry 971 a[i] += (tmp[i+10] << 28) & bottom29Bits 972 carry = a[i] >> 29 973 a[i] &= bottom29Bits 974 975 i++ 976 a[i] = tmp[i+9] >> 1 977 a[i] += carry 978 carry = a[i] >> 28 979 a[i] &= bottom28Bits 980 } 981 a[8] = tmp[17] 982 a[8] += carry 983 carry = a[8] >> 29 984 a[8] &= bottom29Bits 985 sm2P256ReduceCarry(a, carry) 986 } 987 988 // b = a 989 func sm2P256Dup(b, a *sm2P256FieldElement) { 990 *b = *a 991 } 992 993 // X = a * R mod P 994 func sm2P256FromBig(X *sm2P256FieldElement, a *big.Int) { 995 x := new(big.Int).Lsh(a, 257) 996 x.Mod(x, sm2P256.P) 997 for i := 0; i < 9; i++ { 998 if bits := x.Bits(); len(bits) > 0 { 999 X[i] = uint32(bits[0]) & bottom29Bits 1000 } else { 1001 X[i] = 0 1002 } 1003 x.Rsh(x, 29) 1004 i++ 1005 if i == 9 { 1006 break 1007 } 1008 if bits := x.Bits(); len(bits) > 0 { 1009 X[i] = uint32(bits[0]) & bottom28Bits 1010 } else { 1011 X[i] = 0 1012 } 1013 x.Rsh(x, 28) 1014 } 1015 } 1016 1017 // X = r * R mod P 1018 // r = X * R' mod P 1019 func sm2P256ToBig(X *sm2P256FieldElement) *big.Int { 1020 r, tm := new(big.Int), new(big.Int) 1021 r.SetInt64(int64(X[8])) 1022 for i := 7; i >= 0; i-- { 1023 if (i & 1) == 0 { 1024 r.Lsh(r, 29) 1025 } else { 1026 r.Lsh(r, 28) 1027 } 1028 tm.SetInt64(int64(X[i])) 1029 r.Add(r, tm) 1030 } 1031 r.Mul(r, sm2P256.RInverse) 1032 r.Mod(r, sm2P256.P) 1033 return r 1034 } 1035 func WNafReversed(wnaf []int8) []int8 { 1036 wnafRev := make([]int8, len(wnaf), len(wnaf)) 1037 for i, v := range wnaf { 1038 wnafRev[len(wnaf)-(1+i)] = v 1039 } 1040 return wnafRev 1041 } 1042 func sm2GenrateWNaf(b []byte) []int8 { 1043 n := new(big.Int).SetBytes(b) 1044 var k *big.Int 1045 if n.Cmp(sm2P256.N) >= 0 { 1046 n.Mod(n, sm2P256.N) 1047 k = n 1048 } else { 1049 k = n 1050 } 1051 wnaf := make([]int8, k.BitLen()+1, k.BitLen()+1) 1052 if k.Sign() == 0 { 1053 return wnaf 1054 } 1055 var width, pow2, sign int 1056 width, pow2, sign = 4, 16, 8 1057 var mask int64 = 15 1058 var carry bool 1059 var length, pos int 1060 for pos <= k.BitLen() { 1061 if k.Bit(pos) == boolToUint(carry) { 1062 pos++ 1063 continue 1064 } 1065 k.Rsh(k, uint(pos)) 1066 var digit int 1067 digit = int(k.Int64() & mask) 1068 if carry { 1069 digit++ 1070 } 1071 carry = (digit & sign) != 0 1072 if carry { 1073 digit -= pow2 1074 } 1075 length += pos 1076 wnaf[length] = int8(digit) 1077 pos = int(width) 1078 } 1079 if len(wnaf) > length+1 { 1080 t := make([]int8, length+1, length+1) 1081 copy(t, wnaf[0:length+1]) 1082 wnaf = t 1083 } 1084 return wnaf 1085 } 1086 func boolToUint(b bool) uint { 1087 if b { 1088 return 1 1089 } 1090 return 0 1091 } 1092 func abs(a int8) uint32 { 1093 if a < 0 { 1094 return uint32(-a) 1095 } 1096 return uint32(a) 1097 } 1098 1099 func sm2P256ScalarMult(xOut, yOut, zOut, x, y *sm2P256FieldElement, scalar []int8) { 1100 var precomp [16][3]sm2P256FieldElement 1101 var px, py, pz, tx, ty, tz sm2P256FieldElement 1102 var nIsInfinityMask, index, pIsNoninfiniteMask, mask uint32 1103 1104 // We precompute 0,1,2,... times {x,y}. 1105 precomp[1][0] = *x 1106 precomp[1][1] = *y 1107 precomp[1][2] = sm2P256Factor[1] 1108 1109 for i := 2; i < 8; i += 2 { 1110 sm2P256PointDouble(&precomp[i][0], &precomp[i][1], &precomp[i][2], &precomp[i/2][0], &precomp[i/2][1], &precomp[i/2][2]) 1111 sm2P256PointAddMixed(&precomp[i+1][0], &precomp[i+1][1], &precomp[i+1][2], &precomp[i][0], &precomp[i][1], &precomp[i][2], x, y) 1112 } 1113 1114 for i := range xOut { 1115 xOut[i] = 0 1116 } 1117 for i := range yOut { 1118 yOut[i] = 0 1119 } 1120 for i := range zOut { 1121 zOut[i] = 0 1122 } 1123 nIsInfinityMask = ^uint32(0) 1124 var zeroes int16 1125 for i := 0; i < len(scalar); i++ { 1126 if scalar[i] == 0 { 1127 zeroes++ 1128 continue 1129 } 1130 if zeroes > 0 { 1131 for ; zeroes > 0; zeroes-- { 1132 sm2P256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut) 1133 } 1134 } 1135 index = abs(scalar[i]) 1136 sm2P256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut) 1137 sm2P256SelectJacobianPoint(&px, &py, &pz, &precomp, index) 1138 if scalar[i] > 0 { 1139 sm2P256PointAdd(xOut, yOut, zOut, &px, &py, &pz, &tx, &ty, &tz) 1140 } else { 1141 sm2P256PointSub(xOut, yOut, zOut, &px, &py, &pz, &tx, &ty, &tz) 1142 } 1143 sm2P256CopyConditional(xOut, &px, nIsInfinityMask) 1144 sm2P256CopyConditional(yOut, &py, nIsInfinityMask) 1145 sm2P256CopyConditional(zOut, &pz, nIsInfinityMask) 1146 pIsNoninfiniteMask = nonZeroToAllOnes(index) 1147 mask = pIsNoninfiniteMask & ^nIsInfinityMask 1148 sm2P256CopyConditional(xOut, &tx, mask) 1149 sm2P256CopyConditional(yOut, &ty, mask) 1150 sm2P256CopyConditional(zOut, &tz, mask) 1151 nIsInfinityMask &^= pIsNoninfiniteMask 1152 } 1153 if zeroes > 0 { 1154 for ; zeroes > 0; zeroes-- { 1155 sm2P256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut) 1156 } 1157 } 1158 }