github.com/flyinox/gosm@v0.0.0-20171117061539-16768cb62077/src/math/big/prime.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package big 6 7 import "math/rand" 8 9 // ProbablyPrime reports whether x is probably prime, 10 // applying the Miller-Rabin test with n pseudorandomly chosen bases 11 // as well as a Baillie-PSW test. 12 // 13 // If x is prime, ProbablyPrime returns true. 14 // If x is chosen randomly and not prime, ProbablyPrime probably returns false. 15 // The probability of returning true for a randomly chosen non-prime is at most ¼ⁿ. 16 // 17 // ProbablyPrime is 100% accurate for inputs less than 2⁶⁴. 18 // See Menezes et al., Handbook of Applied Cryptography, 1997, pp. 145-149, 19 // and FIPS 186-4 Appendix F for further discussion of the error probabilities. 20 // 21 // ProbablyPrime is not suitable for judging primes that an adversary may 22 // have crafted to fool the test. 23 // 24 // As of Go 1.8, ProbablyPrime(0) is allowed and applies only a Baillie-PSW test. 25 // Before Go 1.8, ProbablyPrime applied only the Miller-Rabin tests, and ProbablyPrime(0) panicked. 26 func (x *Int) ProbablyPrime(n int) bool { 27 // Note regarding the doc comment above: 28 // It would be more precise to say that the Baillie-PSW test uses the 29 // extra strong Lucas test as its Lucas test, but since no one knows 30 // how to tell any of the Lucas tests apart inside a Baillie-PSW test 31 // (they all work equally well empirically), that detail need not be 32 // documented or implicitly guaranteed. 33 // The comment does avoid saying "the" Baillie-PSW test 34 // because of this general ambiguity. 35 36 if n < 0 { 37 panic("negative n for ProbablyPrime") 38 } 39 if x.neg || len(x.abs) == 0 { 40 return false 41 } 42 43 // primeBitMask records the primes < 64. 44 const primeBitMask uint64 = 1<<2 | 1<<3 | 1<<5 | 1<<7 | 45 1<<11 | 1<<13 | 1<<17 | 1<<19 | 1<<23 | 1<<29 | 1<<31 | 46 1<<37 | 1<<41 | 1<<43 | 1<<47 | 1<<53 | 1<<59 | 1<<61 47 48 w := x.abs[0] 49 if len(x.abs) == 1 && w < 64 { 50 return primeBitMask&(1<<w) != 0 51 } 52 53 if w&1 == 0 { 54 return false // n is even 55 } 56 57 const primesA = 3 * 5 * 7 * 11 * 13 * 17 * 19 * 23 * 37 58 const primesB = 29 * 31 * 41 * 43 * 47 * 53 59 60 var rA, rB uint32 61 switch _W { 62 case 32: 63 rA = uint32(x.abs.modW(primesA)) 64 rB = uint32(x.abs.modW(primesB)) 65 case 64: 66 r := x.abs.modW((primesA * primesB) & _M) 67 rA = uint32(r % primesA) 68 rB = uint32(r % primesB) 69 default: 70 panic("math/big: invalid word size") 71 } 72 73 if rA%3 == 0 || rA%5 == 0 || rA%7 == 0 || rA%11 == 0 || rA%13 == 0 || rA%17 == 0 || rA%19 == 0 || rA%23 == 0 || rA%37 == 0 || 74 rB%29 == 0 || rB%31 == 0 || rB%41 == 0 || rB%43 == 0 || rB%47 == 0 || rB%53 == 0 { 75 return false 76 } 77 78 return x.abs.probablyPrimeMillerRabin(n+1, true) && x.abs.probablyPrimeLucas() 79 } 80 81 // probablyPrimeMillerRabin reports whether n passes reps rounds of the 82 // Miller-Rabin primality test, using pseudo-randomly chosen bases. 83 // If force2 is true, one of the rounds is forced to use base 2. 84 // See Handbook of Applied Cryptography, p. 139, Algorithm 4.24. 85 // The number n is known to be non-zero. 86 func (n nat) probablyPrimeMillerRabin(reps int, force2 bool) bool { 87 nm1 := nat(nil).sub(n, natOne) 88 // determine q, k such that nm1 = q << k 89 k := nm1.trailingZeroBits() 90 q := nat(nil).shr(nm1, k) 91 92 nm3 := nat(nil).sub(nm1, natTwo) 93 rand := rand.New(rand.NewSource(int64(n[0]))) 94 95 var x, y, quotient nat 96 nm3Len := nm3.bitLen() 97 98 NextRandom: 99 for i := 0; i < reps; i++ { 100 if i == reps-1 && force2 { 101 x = x.set(natTwo) 102 } else { 103 x = x.random(rand, nm3, nm3Len) 104 x = x.add(x, natTwo) 105 } 106 y = y.expNN(x, q, n) 107 if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 { 108 continue 109 } 110 for j := uint(1); j < k; j++ { 111 y = y.mul(y, y) 112 quotient, y = quotient.div(y, y, n) 113 if y.cmp(nm1) == 0 { 114 continue NextRandom 115 } 116 if y.cmp(natOne) == 0 { 117 return false 118 } 119 } 120 return false 121 } 122 123 return true 124 } 125 126 // probablyPrimeLucas reports whether n passes the "almost extra strong" Lucas probable prime test, 127 // using Baillie-OEIS parameter selection. This corresponds to "AESLPSP" on Jacobsen's tables (link below). 128 // The combination of this test and a Miller-Rabin/Fermat test with base 2 gives a Baillie-PSW test. 129 // 130 // References: 131 // 132 // Baillie and Wagstaff, "Lucas Pseudoprimes", Mathematics of Computation 35(152), 133 // October 1980, pp. 1391-1417, especially page 1401. 134 // http://www.ams.org/journals/mcom/1980-35-152/S0025-5718-1980-0583518-6/S0025-5718-1980-0583518-6.pdf 135 // 136 // Grantham, "Frobenius Pseudoprimes", Mathematics of Computation 70(234), 137 // March 2000, pp. 873-891. 138 // http://www.ams.org/journals/mcom/2001-70-234/S0025-5718-00-01197-2/S0025-5718-00-01197-2.pdf 139 // 140 // Baillie, "Extra strong Lucas pseudoprimes", OEIS A217719, https://oeis.org/A217719. 141 // 142 // Jacobsen, "Pseudoprime Statistics, Tables, and Data", http://ntheory.org/pseudoprimes.html. 143 // 144 // Nicely, "The Baillie-PSW Primality Test", http://www.trnicely.net/misc/bpsw.html. 145 // (Note that Nicely's definition of the "extra strong" test gives the wrong Jacobi condition, 146 // as pointed out by Jacobsen.) 147 // 148 // Crandall and Pomerance, Prime Numbers: A Computational Perspective, 2nd ed. 149 // Springer, 2005. 150 func (n nat) probablyPrimeLucas() bool { 151 // Discard 0, 1. 152 if len(n) == 0 || n.cmp(natOne) == 0 { 153 return false 154 } 155 // Two is the only even prime. 156 // Already checked by caller, but here to allow testing in isolation. 157 if n[0]&1 == 0 { 158 return n.cmp(natTwo) == 0 159 } 160 161 // Baillie-OEIS "method C" for choosing D, P, Q, 162 // as in https://oeis.org/A217719/a217719.txt: 163 // try increasing P ≥ 3 such that D = P² - 4 (so Q = 1) 164 // until Jacobi(D, n) = -1. 165 // The search is expected to succeed for non-square n after just a few trials. 166 // After more than expected failures, check whether n is square 167 // (which would cause Jacobi(D, n) = 1 for all D not dividing n). 168 p := Word(3) 169 d := nat{1} 170 t1 := nat(nil) // temp 171 intD := &Int{abs: d} 172 intN := &Int{abs: n} 173 for ; ; p++ { 174 if p > 10000 { 175 // This is widely believed to be impossible. 176 // If we get a report, we'll want the exact number n. 177 panic("math/big: internal error: cannot find (D/n) = -1 for " + intN.String()) 178 } 179 d[0] = p*p - 4 180 j := Jacobi(intD, intN) 181 if j == -1 { 182 break 183 } 184 if j == 0 { 185 // d = p²-4 = (p-2)(p+2). 186 // If (d/n) == 0 then d shares a prime factor with n. 187 // Since the loop proceeds in increasing p and starts with p-2==1, 188 // the shared prime factor must be p+2. 189 // If p+2 == n, then n is prime; otherwise p+2 is a proper factor of n. 190 return len(n) == 1 && n[0] == p+2 191 } 192 if p == 40 { 193 // We'll never find (d/n) = -1 if n is a square. 194 // If n is a non-square we expect to find a d in just a few attempts on average. 195 // After 40 attempts, take a moment to check if n is indeed a square. 196 t1 = t1.sqrt(n) 197 t1 = t1.mul(t1, t1) 198 if t1.cmp(n) == 0 { 199 return false 200 } 201 } 202 } 203 204 // Grantham definition of "extra strong Lucas pseudoprime", after Thm 2.3 on p. 876 205 // (D, P, Q above have become Δ, b, 1): 206 // 207 // Let U_n = U_n(b, 1), V_n = V_n(b, 1), and Δ = b²-4. 208 // An extra strong Lucas pseudoprime to base b is a composite n = 2^r s + Jacobi(Δ, n), 209 // where s is odd and gcd(n, 2*Δ) = 1, such that either (i) U_s ≡ 0 mod n and V_s ≡ ±2 mod n, 210 // or (ii) V_{2^t s} ≡ 0 mod n for some 0 ≤ t < r-1. 211 // 212 // We know gcd(n, Δ) = 1 or else we'd have found Jacobi(d, n) == 0 above. 213 // We know gcd(n, 2) = 1 because n is odd. 214 // 215 // Arrange s = (n - Jacobi(Δ, n)) / 2^r = (n+1) / 2^r. 216 s := nat(nil).add(n, natOne) 217 r := int(s.trailingZeroBits()) 218 s = s.shr(s, uint(r)) 219 nm2 := nat(nil).sub(n, natTwo) // n-2 220 221 // We apply the "almost extra strong" test, which checks the above conditions 222 // except for U_s ≡ 0 mod n, which allows us to avoid computing any U_k values. 223 // Jacobsen points out that maybe we should just do the full extra strong test: 224 // "It is also possible to recover U_n using Crandall and Pomerance equation 3.13: 225 // U_n = D^-1 (2V_{n+1} - PV_n) allowing us to run the full extra-strong test 226 // at the cost of a single modular inversion. This computation is easy and fast in GMP, 227 // so we can get the full extra-strong test at essentially the same performance as the 228 // almost extra strong test." 229 230 // Compute Lucas sequence V_s(b, 1), where: 231 // 232 // V(0) = 2 233 // V(1) = P 234 // V(k) = P V(k-1) - Q V(k-2). 235 // 236 // (Remember that due to method C above, P = b, Q = 1.) 237 // 238 // In general V(k) = α^k + β^k, where α and β are roots of x² - Px + Q. 239 // Crandall and Pomerance (p.147) observe that for 0 ≤ j ≤ k, 240 // 241 // V(j+k) = V(j)V(k) - V(k-j). 242 // 243 // So in particular, to quickly double the subscript: 244 // 245 // V(2k) = V(k)² - 2 246 // V(2k+1) = V(k) V(k+1) - P 247 // 248 // We can therefore start with k=0 and build up to k=s in log₂(s) steps. 249 natP := nat(nil).setWord(p) 250 vk := nat(nil).setWord(2) 251 vk1 := nat(nil).setWord(p) 252 t2 := nat(nil) // temp 253 for i := int(s.bitLen()); i >= 0; i-- { 254 if s.bit(uint(i)) != 0 { 255 // k' = 2k+1 256 // V(k') = V(2k+1) = V(k) V(k+1) - P. 257 t1 = t1.mul(vk, vk1) 258 t1 = t1.add(t1, n) 259 t1 = t1.sub(t1, natP) 260 t2, vk = t2.div(vk, t1, n) 261 // V(k'+1) = V(2k+2) = V(k+1)² - 2. 262 t1 = t1.mul(vk1, vk1) 263 t1 = t1.add(t1, nm2) 264 t2, vk1 = t2.div(vk1, t1, n) 265 } else { 266 // k' = 2k 267 // V(k'+1) = V(2k+1) = V(k) V(k+1) - P. 268 t1 = t1.mul(vk, vk1) 269 t1 = t1.add(t1, n) 270 t1 = t1.sub(t1, natP) 271 t2, vk1 = t2.div(vk1, t1, n) 272 // V(k') = V(2k) = V(k)² - 2 273 t1 = t1.mul(vk, vk) 274 t1 = t1.add(t1, nm2) 275 t2, vk = t2.div(vk, t1, n) 276 } 277 } 278 279 // Now k=s, so vk = V(s). Check V(s) ≡ ±2 (mod n). 280 if vk.cmp(natTwo) == 0 || vk.cmp(nm2) == 0 { 281 // Check U(s) ≡ 0. 282 // As suggested by Jacobsen, apply Crandall and Pomerance equation 3.13: 283 // 284 // U(k) = D⁻¹ (2 V(k+1) - P V(k)) 285 // 286 // Since we are checking for U(k) == 0 it suffices to check 2 V(k+1) == P V(k) mod n, 287 // or P V(k) - 2 V(k+1) == 0 mod n. 288 t1 := t1.mul(vk, natP) 289 t2 := t2.shl(vk1, 1) 290 if t1.cmp(t2) < 0 { 291 t1, t2 = t2, t1 292 } 293 t1 = t1.sub(t1, t2) 294 t3 := vk1 // steal vk1, no longer needed below 295 vk1 = nil 296 _ = vk1 297 t2, t3 = t2.div(t3, t1, n) 298 if len(t3) == 0 { 299 return true 300 } 301 } 302 303 // Check V(2^t s) ≡ 0 mod n for some 0 ≤ t < r-1. 304 for t := 0; t < r-1; t++ { 305 if len(vk) == 0 { // vk == 0 306 return true 307 } 308 // Optimization: V(k) = 2 is a fixed point for V(k') = V(k)² - 2, 309 // so if V(k) = 2, we can stop: we will never find a future V(k) == 0. 310 if len(vk) == 1 && vk[0] == 2 { // vk == 2 311 return false 312 } 313 // k' = 2k 314 // V(k') = V(2k) = V(k)² - 2 315 t1 = t1.mul(vk, vk) 316 t1 = t1.sub(t1, natTwo) 317 t2, vk = t2.div(vk, t1, n) 318 } 319 return false 320 }