github.com/ethereum/go-ethereum@v1.16.1/crypto/secp256k1/libsecp256k1/src/modinv64_impl.h (about)

     1  /***********************************************************************
     2   * Copyright (c) 2020 Peter Dettman                                    *
     3   * Distributed under the MIT software license, see the accompanying    *
     4   * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
     5   **********************************************************************/
     6  
     7  #ifndef SECP256K1_MODINV64_IMPL_H
     8  #define SECP256K1_MODINV64_IMPL_H
     9  
    10  #include "int128.h"
    11  #include "modinv64.h"
    12  
    13  /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
    14   * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
    15   *
    16   * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
    17   * implementation for N=62, using 62-bit signed limbs represented as int64_t.
    18   */
    19  
    20  /* Data type for transition matrices (see section 3 of explanation).
    21   *
    22   * t = [ u  v ]
    23   *     [ q  r ]
    24   */
    25  typedef struct {
    26      int64_t u, v, q, r;
    27  } secp256k1_modinv64_trans2x2;
    28  
    29  #ifdef VERIFY
    30  /* Helper function to compute the absolute value of an int64_t.
    31   * (we don't use abs/labs/llabs as it depends on the int sizes). */
    32  static int64_t secp256k1_modinv64_abs(int64_t v) {
    33      VERIFY_CHECK(v > INT64_MIN);
    34      if (v < 0) return -v;
    35      return v;
    36  }
    37  
    38  static const secp256k1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}};
    39  
    40  /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */
    41  static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int alen, int64_t factor) {
    42      const uint64_t M62 = UINT64_MAX >> 2;
    43      secp256k1_int128 c, d;
    44      int i;
    45      secp256k1_i128_from_i64(&c, 0);
    46      for (i = 0; i < 4; ++i) {
    47          if (i < alen) secp256k1_i128_accum_mul(&c, a->v[i], factor);
    48          r->v[i] = secp256k1_i128_to_u64(&c) & M62; secp256k1_i128_rshift(&c, 62);
    49      }
    50      if (4 < alen) secp256k1_i128_accum_mul(&c, a->v[4], factor);
    51      secp256k1_i128_from_i64(&d, secp256k1_i128_to_i64(&c));
    52      VERIFY_CHECK(secp256k1_i128_eq_var(&c, &d));
    53      r->v[4] = secp256k1_i128_to_i64(&c);
    54  }
    55  
    56  /* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */
    57  static int secp256k1_modinv64_mul_cmp_62(const secp256k1_modinv64_signed62 *a, int alen, const secp256k1_modinv64_signed62 *b, int64_t factor) {
    58      int i;
    59      secp256k1_modinv64_signed62 am, bm;
    60      secp256k1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */
    61      secp256k1_modinv64_mul_62(&bm, b, 5, factor);
    62      for (i = 0; i < 4; ++i) {
    63          /* Verify that all but the top limb of a and b are normalized. */
    64          VERIFY_CHECK(am.v[i] >> 62 == 0);
    65          VERIFY_CHECK(bm.v[i] >> 62 == 0);
    66      }
    67      for (i = 4; i >= 0; --i) {
    68          if (am.v[i] < bm.v[i]) return -1;
    69          if (am.v[i] > bm.v[i]) return 1;
    70      }
    71      return 0;
    72  }
    73  
    74  /* Check if the determinant of t is equal to 1 << n. If abs, check if |det t| == 1 << n. */
    75  static int secp256k1_modinv64_det_check_pow2(const secp256k1_modinv64_trans2x2 *t, unsigned int n, int abs) {
    76      secp256k1_int128 a;
    77      secp256k1_i128_det(&a, t->u, t->v, t->q, t->r);
    78      if (secp256k1_i128_check_pow2(&a, n, 1)) return 1;
    79      if (abs && secp256k1_i128_check_pow2(&a, n, -1)) return 1;
    80      return 0;
    81  }
    82  #endif
    83  
    84  /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus
    85   * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
    86   * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range
    87   * [0,2^62). */
    88  static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int64_t sign, const secp256k1_modinv64_modinfo *modinfo) {
    89      const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
    90      int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4];
    91      volatile int64_t cond_add, cond_negate;
    92  
    93  #ifdef VERIFY
    94      /* Verify that all limbs are in range (-2^62,2^62). */
    95      int i;
    96      for (i = 0; i < 5; ++i) {
    97          VERIFY_CHECK(r->v[i] >= -M62);
    98          VERIFY_CHECK(r->v[i] <= M62);
    99      }
   100      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
   101      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
   102  #endif
   103  
   104      /* In a first step, add the modulus if the input is negative, and then negate if requested.
   105       * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
   106       * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right
   107       * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
   108       * indeed the behavior of the right shift operator). */
   109      cond_add = r4 >> 63;
   110      r0 += modinfo->modulus.v[0] & cond_add;
   111      r1 += modinfo->modulus.v[1] & cond_add;
   112      r2 += modinfo->modulus.v[2] & cond_add;
   113      r3 += modinfo->modulus.v[3] & cond_add;
   114      r4 += modinfo->modulus.v[4] & cond_add;
   115      cond_negate = sign >> 63;
   116      r0 = (r0 ^ cond_negate) - cond_negate;
   117      r1 = (r1 ^ cond_negate) - cond_negate;
   118      r2 = (r2 ^ cond_negate) - cond_negate;
   119      r3 = (r3 ^ cond_negate) - cond_negate;
   120      r4 = (r4 ^ cond_negate) - cond_negate;
   121      /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */
   122      r1 += r0 >> 62; r0 &= M62;
   123      r2 += r1 >> 62; r1 &= M62;
   124      r3 += r2 >> 62; r2 &= M62;
   125      r4 += r3 >> 62; r3 &= M62;
   126  
   127      /* In a second step add the modulus again if the result is still negative, bringing
   128       * r to range [0,modulus). */
   129      cond_add = r4 >> 63;
   130      r0 += modinfo->modulus.v[0] & cond_add;
   131      r1 += modinfo->modulus.v[1] & cond_add;
   132      r2 += modinfo->modulus.v[2] & cond_add;
   133      r3 += modinfo->modulus.v[3] & cond_add;
   134      r4 += modinfo->modulus.v[4] & cond_add;
   135      /* And propagate again. */
   136      r1 += r0 >> 62; r0 &= M62;
   137      r2 += r1 >> 62; r1 &= M62;
   138      r3 += r2 >> 62; r2 &= M62;
   139      r4 += r3 >> 62; r3 &= M62;
   140  
   141      r->v[0] = r0;
   142      r->v[1] = r1;
   143      r->v[2] = r2;
   144      r->v[3] = r3;
   145      r->v[4] = r4;
   146  
   147      VERIFY_CHECK(r0 >> 62 == 0);
   148      VERIFY_CHECK(r1 >> 62 == 0);
   149      VERIFY_CHECK(r2 >> 62 == 0);
   150      VERIFY_CHECK(r3 >> 62 == 0);
   151      VERIFY_CHECK(r4 >> 62 == 0);
   152      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
   153      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
   154  }
   155  
   156  /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
   157   * Note that the transformation matrix is scaled by 2^62 and not 2^59.
   158   *
   159   * Input:  zeta: initial zeta
   160   *         f0:   bottom limb of initial f
   161   *         g0:   bottom limb of initial g
   162   * Output: t: transition matrix
   163   * Return: final zeta
   164   *
   165   * Implements the divsteps_n_matrix function from the explanation.
   166   */
   167  static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) {
   168      /* u,v,q,r are the elements of the transformation matrix being built up,
   169       * starting with the identity matrix times 8 (because the caller expects
   170       * a result scaled by 2^62). Semantically they are signed integers
   171       * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This
   172       * permits left shifting (which is UB for negative numbers). The range
   173       * being inside [-2^63,2^63) means that casting to signed works correctly.
   174       */
   175      uint64_t u = 8, v = 0, q = 0, r = 8;
   176      volatile uint64_t c1, c2;
   177      uint64_t mask1, mask2, f = f0, g = g0, x, y, z;
   178      int i;
   179  
   180      for (i = 3; i < 62; ++i) {
   181          VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
   182          VERIFY_CHECK((u * f0 + v * g0) == f << i);
   183          VERIFY_CHECK((q * f0 + r * g0) == g << i);
   184          /* Compute conditional masks for (zeta < 0) and for (g & 1). */
   185          c1 = zeta >> 63;
   186          mask1 = c1;
   187          c2 = g & 1;
   188          mask2 = -c2;
   189          /* Compute x,y,z, conditionally negated versions of f,u,v. */
   190          x = (f ^ mask1) - mask1;
   191          y = (u ^ mask1) - mask1;
   192          z = (v ^ mask1) - mask1;
   193          /* Conditionally add x,y,z to g,q,r. */
   194          g += x & mask2;
   195          q += y & mask2;
   196          r += z & mask2;
   197          /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
   198          mask1 &= mask2;
   199          /* Conditionally change zeta into -zeta-2 or zeta-1. */
   200          zeta = (zeta ^ mask1) - 1;
   201          /* Conditionally add g,q,r to f,u,v. */
   202          f += g & mask1;
   203          u += q & mask1;
   204          v += r & mask1;
   205          /* Shifts */
   206          g >>= 1;
   207          u <<= 1;
   208          v <<= 1;
   209          /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */
   210          VERIFY_CHECK(zeta >= -591 && zeta <= 591);
   211      }
   212      /* Return data in t and return value. */
   213      t->u = (int64_t)u;
   214      t->v = (int64_t)v;
   215      t->q = (int64_t)q;
   216      t->r = (int64_t)r;
   217  
   218      /* The determinant of t must be a power of two. This guarantees that multiplication with t
   219       * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
   220       * will be divided out again). As each divstep's individual matrix has determinant 2, the
   221       * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial
   222       * 8*identity (which has determinant 2^6) means the overall outputs has determinant
   223       * 2^65. */
   224      VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0));
   225  
   226      return zeta;
   227  }
   228  
   229  /* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta).
   230   *
   231   * Input:  eta: initial eta
   232   *         f0:  bottom limb of initial f
   233   *         g0:  bottom limb of initial g
   234   * Output: t: transition matrix
   235   * Return: final eta
   236   *
   237   * Implements the divsteps_n_matrix_var function from the explanation.
   238   */
   239  static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) {
   240      /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */
   241      uint64_t u = 1, v = 0, q = 0, r = 1;
   242      uint64_t f = f0, g = g0, m;
   243      uint32_t w;
   244      int i = 62, limit, zeros;
   245  
   246      for (;;) {
   247          /* Use a sentinel bit to count zeros only up to i. */
   248          zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i));
   249          /* Perform zeros divsteps at once; they all just divide g by two. */
   250          g >>= zeros;
   251          u <<= zeros;
   252          v <<= zeros;
   253          eta -= zeros;
   254          i -= zeros;
   255          /* We're done once we've done 62 divsteps. */
   256          if (i == 0) break;
   257          VERIFY_CHECK((f & 1) == 1);
   258          VERIFY_CHECK((g & 1) == 1);
   259          VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
   260          VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
   261          /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
   262          VERIFY_CHECK(eta >= -745 && eta <= 745);
   263          /* If eta is negative, negate it and replace f,g with g,-f. */
   264          if (eta < 0) {
   265              uint64_t tmp;
   266              eta = -eta;
   267              tmp = f; f = g; g = -tmp;
   268              tmp = u; u = q; q = -tmp;
   269              tmp = v; v = r; r = -tmp;
   270              /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
   271               * out (as we'd be done before that point), and no more than eta+1 can be done as its
   272               * sign will flip again once that happens. */
   273              limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
   274              VERIFY_CHECK(limit > 0 && limit <= 62);
   275              /* m is a mask for the bottom min(limit, 6) bits. */
   276              m = (UINT64_MAX >> (64 - limit)) & 63U;
   277              /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
   278               * bits. */
   279              w = (f * g * (f * f - 2)) & m;
   280          } else {
   281              /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
   282               * eta tends to be smaller here. */
   283              limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
   284              VERIFY_CHECK(limit > 0 && limit <= 62);
   285              /* m is a mask for the bottom min(limit, 4) bits. */
   286              m = (UINT64_MAX >> (64 - limit)) & 15U;
   287              /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
   288               * bits. */
   289              w = f + (((f + 1) & 4) << 1);
   290              w = (-w * g) & m;
   291          }
   292          g += f * w;
   293          q += u * w;
   294          r += v * w;
   295          VERIFY_CHECK((g & m) == 0);
   296      }
   297      /* Return data in t and return value. */
   298      t->u = (int64_t)u;
   299      t->v = (int64_t)v;
   300      t->q = (int64_t)q;
   301      t->r = (int64_t)r;
   302  
   303      /* The determinant of t must be a power of two. This guarantees that multiplication with t
   304       * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
   305       * will be divided out again). As each divstep's individual matrix has determinant 2, the
   306       * aggregate of 62 of them will have determinant 2^62. */
   307      VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0));
   308  
   309      return eta;
   310  }
   311  
   312  /* Compute the transition matrix and eta for 62 posdivsteps (variable time, eta=-delta), and keeps track
   313   * of the Jacobi symbol along the way. f0 and g0 must be f and g mod 2^64 rather than 2^62, because
   314   * Jacobi tracking requires knowing (f mod 8) rather than just (f mod 2).
   315   *
   316   * Input:        eta: initial eta
   317   *               f0:  bottom limb of initial f
   318   *               g0:  bottom limb of initial g
   319   * Output:       t: transition matrix
   320   * Input/Output: (*jacp & 1) is bitflipped if and only if the Jacobi symbol of (f | g) changes sign
   321   *               by applying the returned transformation matrix to it. The other bits of *jacp may
   322   *               change, but are meaningless.
   323   * Return:       final eta
   324   */
   325  static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t, int *jacp) {
   326      /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */
   327      uint64_t u = 1, v = 0, q = 0, r = 1;
   328      uint64_t f = f0, g = g0, m;
   329      uint32_t w;
   330      int i = 62, limit, zeros;
   331      int jac = *jacp;
   332  
   333      for (;;) {
   334          /* Use a sentinel bit to count zeros only up to i. */
   335          zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i));
   336          /* Perform zeros divsteps at once; they all just divide g by two. */
   337          g >>= zeros;
   338          u <<= zeros;
   339          v <<= zeros;
   340          eta -= zeros;
   341          i -= zeros;
   342          /* Update the bottom bit of jac: when dividing g by an odd power of 2,
   343           * if (f mod 8) is 3 or 5, the Jacobi symbol changes sign. */
   344          jac ^= (zeros & ((f >> 1) ^ (f >> 2)));
   345          /* We're done once we've done 62 posdivsteps. */
   346          if (i == 0) break;
   347          VERIFY_CHECK((f & 1) == 1);
   348          VERIFY_CHECK((g & 1) == 1);
   349          VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
   350          VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
   351          /* If eta is negative, negate it and replace f,g with g,f. */
   352          if (eta < 0) {
   353              uint64_t tmp;
   354              eta = -eta;
   355              tmp = f; f = g; g = tmp;
   356              tmp = u; u = q; q = tmp;
   357              tmp = v; v = r; r = tmp;
   358              /* Update bottom bit of jac: when swapping f and g, the Jacobi symbol changes sign
   359               * if both f and g are 3 mod 4. */
   360              jac ^= ((f & g) >> 1);
   361              /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
   362               * out (as we'd be done before that point), and no more than eta+1 can be done as its
   363               * sign will flip again once that happens. */
   364              limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
   365              VERIFY_CHECK(limit > 0 && limit <= 62);
   366              /* m is a mask for the bottom min(limit, 6) bits. */
   367              m = (UINT64_MAX >> (64 - limit)) & 63U;
   368              /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
   369               * bits. */
   370              w = (f * g * (f * f - 2)) & m;
   371          } else {
   372              /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
   373               * eta tends to be smaller here. */
   374              limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
   375              VERIFY_CHECK(limit > 0 && limit <= 62);
   376              /* m is a mask for the bottom min(limit, 4) bits. */
   377              m = (UINT64_MAX >> (64 - limit)) & 15U;
   378              /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
   379               * bits. */
   380              w = f + (((f + 1) & 4) << 1);
   381              w = (-w * g) & m;
   382          }
   383          g += f * w;
   384          q += u * w;
   385          r += v * w;
   386          VERIFY_CHECK((g & m) == 0);
   387      }
   388      /* Return data in t and return value. */
   389      t->u = (int64_t)u;
   390      t->v = (int64_t)v;
   391      t->q = (int64_t)q;
   392      t->r = (int64_t)r;
   393  
   394      /* The determinant of t must be a power of two. This guarantees that multiplication with t
   395       * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
   396       * will be divided out again). As each divstep's individual matrix has determinant 2 or -2,
   397       * the aggregate of 62 of them will have determinant 2^62 or -2^62. */
   398      VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1));
   399  
   400      *jacp = jac;
   401      return eta;
   402  }
   403  
   404  /* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62.
   405   *
   406   * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
   407   * (-2^62,2^62).
   408   *
   409   * This implements the update_de function from the explanation.
   410   */
   411  static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo* modinfo) {
   412      const uint64_t M62 = UINT64_MAX >> 2;
   413      const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4];
   414      const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4];
   415      const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
   416      int64_t md, me, sd, se;
   417      secp256k1_int128 cd, ce;
   418      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
   419      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */
   420      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
   421      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */
   422      VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */
   423      VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */
   424  
   425      /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
   426      sd = d4 >> 63;
   427      se = e4 >> 63;
   428      md = (u & sd) + (v & se);
   429      me = (q & sd) + (r & se);
   430      /* Begin computing t*[d,e]. */
   431      secp256k1_i128_mul(&cd, u, d0);
   432      secp256k1_i128_accum_mul(&cd, v, e0);
   433      secp256k1_i128_mul(&ce, q, d0);
   434      secp256k1_i128_accum_mul(&ce, r, e0);
   435      /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */
   436      md -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&cd) + md) & M62;
   437      me -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&ce) + me) & M62;
   438      /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
   439      secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[0], md);
   440      secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[0], me);
   441      /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */
   442      VERIFY_CHECK((secp256k1_i128_to_u64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62);
   443      VERIFY_CHECK((secp256k1_i128_to_u64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62);
   444      /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */
   445      secp256k1_i128_accum_mul(&cd, u, d1);
   446      secp256k1_i128_accum_mul(&cd, v, e1);
   447      secp256k1_i128_accum_mul(&ce, q, d1);
   448      secp256k1_i128_accum_mul(&ce, r, e1);
   449      if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */
   450          secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[1], md);
   451          secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[1], me);
   452      }
   453      d->v[0] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
   454      e->v[0] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
   455      /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */
   456      secp256k1_i128_accum_mul(&cd, u, d2);
   457      secp256k1_i128_accum_mul(&cd, v, e2);
   458      secp256k1_i128_accum_mul(&ce, q, d2);
   459      secp256k1_i128_accum_mul(&ce, r, e2);
   460      if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */
   461          secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[2], md);
   462          secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[2], me);
   463      }
   464      d->v[1] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
   465      e->v[1] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
   466      /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */
   467      secp256k1_i128_accum_mul(&cd, u, d3);
   468      secp256k1_i128_accum_mul(&cd, v, e3);
   469      secp256k1_i128_accum_mul(&ce, q, d3);
   470      secp256k1_i128_accum_mul(&ce, r, e3);
   471      if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */
   472          secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[3], md);
   473          secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[3], me);
   474      }
   475      d->v[2] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
   476      e->v[2] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
   477      /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */
   478      secp256k1_i128_accum_mul(&cd, u, d4);
   479      secp256k1_i128_accum_mul(&cd, v, e4);
   480      secp256k1_i128_accum_mul(&ce, q, d4);
   481      secp256k1_i128_accum_mul(&ce, r, e4);
   482      secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[4], md);
   483      secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[4], me);
   484      d->v[3] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
   485      e->v[3] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
   486      /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
   487      d->v[4] = secp256k1_i128_to_i64(&cd);
   488      e->v[4] = secp256k1_i128_to_i64(&ce);
   489  
   490      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
   491      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */
   492      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
   493      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */
   494  }
   495  
   496  /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
   497   *
   498   * This implements the update_fg function from the explanation.
   499   */
   500  static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) {
   501      const uint64_t M62 = UINT64_MAX >> 2;
   502      const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
   503      const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];
   504      const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
   505      secp256k1_int128 cf, cg;
   506      /* Start computing t*[f,g]. */
   507      secp256k1_i128_mul(&cf, u, f0);
   508      secp256k1_i128_accum_mul(&cf, v, g0);
   509      secp256k1_i128_mul(&cg, q, f0);
   510      secp256k1_i128_accum_mul(&cg, r, g0);
   511      /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
   512      VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
   513      VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
   514      /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */
   515      secp256k1_i128_accum_mul(&cf, u, f1);
   516      secp256k1_i128_accum_mul(&cf, v, g1);
   517      secp256k1_i128_accum_mul(&cg, q, f1);
   518      secp256k1_i128_accum_mul(&cg, r, g1);
   519      f->v[0] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
   520      g->v[0] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
   521      /* Compute limb 2 of t*[f,g], and store it as output limb 1. */
   522      secp256k1_i128_accum_mul(&cf, u, f2);
   523      secp256k1_i128_accum_mul(&cf, v, g2);
   524      secp256k1_i128_accum_mul(&cg, q, f2);
   525      secp256k1_i128_accum_mul(&cg, r, g2);
   526      f->v[1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
   527      g->v[1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
   528      /* Compute limb 3 of t*[f,g], and store it as output limb 2. */
   529      secp256k1_i128_accum_mul(&cf, u, f3);
   530      secp256k1_i128_accum_mul(&cf, v, g3);
   531      secp256k1_i128_accum_mul(&cg, q, f3);
   532      secp256k1_i128_accum_mul(&cg, r, g3);
   533      f->v[2] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
   534      g->v[2] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
   535      /* Compute limb 4 of t*[f,g], and store it as output limb 3. */
   536      secp256k1_i128_accum_mul(&cf, u, f4);
   537      secp256k1_i128_accum_mul(&cf, v, g4);
   538      secp256k1_i128_accum_mul(&cg, q, f4);
   539      secp256k1_i128_accum_mul(&cg, r, g4);
   540      f->v[3] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
   541      g->v[3] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
   542      /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */
   543      f->v[4] = secp256k1_i128_to_i64(&cf);
   544      g->v[4] = secp256k1_i128_to_i64(&cg);
   545  }
   546  
   547  /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
   548   *
   549   * Version that operates on a variable number of limbs in f and g.
   550   *
   551   * This implements the update_fg function from the explanation.
   552   */
   553  static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) {
   554      const uint64_t M62 = UINT64_MAX >> 2;
   555      const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
   556      int64_t fi, gi;
   557      secp256k1_int128 cf, cg;
   558      int i;
   559      VERIFY_CHECK(len > 0);
   560      /* Start computing t*[f,g]. */
   561      fi = f->v[0];
   562      gi = g->v[0];
   563      secp256k1_i128_mul(&cf, u, fi);
   564      secp256k1_i128_accum_mul(&cf, v, gi);
   565      secp256k1_i128_mul(&cg, q, fi);
   566      secp256k1_i128_accum_mul(&cg, r, gi);
   567      /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
   568      VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
   569      VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
   570      /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
   571       * down by 62 bits). */
   572      for (i = 1; i < len; ++i) {
   573          fi = f->v[i];
   574          gi = g->v[i];
   575          secp256k1_i128_accum_mul(&cf, u, fi);
   576          secp256k1_i128_accum_mul(&cf, v, gi);
   577          secp256k1_i128_accum_mul(&cg, q, fi);
   578          secp256k1_i128_accum_mul(&cg, r, gi);
   579          f->v[i - 1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
   580          g->v[i - 1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
   581      }
   582      /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
   583      f->v[len - 1] = secp256k1_i128_to_i64(&cf);
   584      g->v[len - 1] = secp256k1_i128_to_i64(&cg);
   585  }
   586  
   587  /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
   588  static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) {
   589      /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
   590      secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
   591      secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
   592      secp256k1_modinv64_signed62 f = modinfo->modulus;
   593      secp256k1_modinv64_signed62 g = *x;
   594      int i;
   595      int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */
   596  
   597      /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */
   598      for (i = 0; i < 10; ++i) {
   599          /* Compute transition matrix and new zeta after 59 divsteps. */
   600          secp256k1_modinv64_trans2x2 t;
   601          zeta = secp256k1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t);
   602          /* Update d,e using that transition matrix. */
   603          secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
   604          /* Update f,g using that transition matrix. */
   605          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
   606          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   607          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
   608          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */
   609  
   610          secp256k1_modinv64_update_fg_62(&f, &g, &t);
   611  
   612          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
   613          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   614          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
   615          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */
   616      }
   617  
   618      /* At this point sufficient iterations have been performed that g must have reached 0
   619       * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
   620       * values i.e. +/- 1, and d now contains +/- the modular inverse. */
   621  
   622      /* g == 0 */
   623      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
   624      /* |f| == 1, or (x == 0 and d == 0 and f == modulus) */
   625      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
   626                   secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
   627                   (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
   628                    secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
   629                    secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0));
   630  
   631      /* Optionally negate d, normalize to [0,modulus), and return it. */
   632      secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo);
   633      *x = d;
   634  }
   635  
   636  /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
   637  static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) {
   638      /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
   639      secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
   640      secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
   641      secp256k1_modinv64_signed62 f = modinfo->modulus;
   642      secp256k1_modinv64_signed62 g = *x;
   643  #ifdef VERIFY
   644      int i = 0;
   645  #endif
   646      int j, len = 5;
   647      int64_t eta = -1; /* eta = -delta; delta is initially 1 */
   648      int64_t cond, fn, gn;
   649  
   650      /* Do iterations of 62 divsteps each until g=0. */
   651      while (1) {
   652          /* Compute transition matrix and new eta after 62 divsteps. */
   653          secp256k1_modinv64_trans2x2 t;
   654          eta = secp256k1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t);
   655          /* Update d,e using that transition matrix. */
   656          secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
   657          /* Update f,g using that transition matrix. */
   658          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
   659          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   660          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
   661          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */
   662  
   663          secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
   664          /* If the bottom limb of g is zero, there is a chance that g=0. */
   665          if (g.v[0] == 0) {
   666              cond = 0;
   667              /* Check if the other limbs are also 0. */
   668              for (j = 1; j < len; ++j) {
   669                  cond |= g.v[j];
   670              }
   671              /* If so, we're done. */
   672              if (cond == 0) break;
   673          }
   674  
   675          /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
   676          fn = f.v[len - 1];
   677          gn = g.v[len - 1];
   678          cond = ((int64_t)len - 2) >> 63;
   679          cond |= fn ^ (fn >> 63);
   680          cond |= gn ^ (gn >> 63);
   681          /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
   682          if (cond == 0) {
   683              f.v[len - 2] |= (uint64_t)fn << 62;
   684              g.v[len - 2] |= (uint64_t)gn << 62;
   685              --len;
   686          }
   687  
   688          VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
   689          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
   690          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   691          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
   692          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */
   693      }
   694  
   695      /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
   696       * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
   697  
   698      /* g == 0 */
   699      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
   700      /* |f| == 1, or (x == 0 and d == 0 and f == modulus) */
   701      VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
   702                   secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
   703                   (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
   704                    secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
   705                    secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0));
   706  
   707      /* Optionally negate d, normalize to [0,modulus), and return it. */
   708      secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
   709      *x = d;
   710  }
   711  
   712  /* Do up to 25 iterations of 62 posdivsteps (up to 1550 steps; more is extremely rare) each until f=1.
   713   * In VERIFY mode use a lower number of iterations (744, close to the median 756), so failure actually occurs. */
   714  #ifdef VERIFY
   715  #define JACOBI64_ITERATIONS 12
   716  #else
   717  #define JACOBI64_ITERATIONS 25
   718  #endif
   719  
   720  /* Compute the Jacobi symbol of x modulo modinfo->modulus (variable time). gcd(x,modulus) must be 1. */
   721  static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) {
   722      /* Start with f=modulus, g=x, eta=-1. */
   723      secp256k1_modinv64_signed62 f = modinfo->modulus;
   724      secp256k1_modinv64_signed62 g = *x;
   725      int j, len = 5;
   726      int64_t eta = -1; /* eta = -delta; delta is initially 1 */
   727      int64_t cond, fn, gn;
   728      int jac = 0;
   729      int count;
   730  
   731      /* The input limbs must all be non-negative. */
   732      VERIFY_CHECK(g.v[0] >= 0 && g.v[1] >= 0 && g.v[2] >= 0 && g.v[3] >= 0 && g.v[4] >= 0);
   733  
   734      /* If x > 0, then if the loop below converges, it converges to f=g=gcd(x,modulus). Since we
   735       * require that gcd(x,modulus)=1 and modulus>=3, x cannot be 0. Thus, we must reach f=1 (or
   736       * time out). */
   737      VERIFY_CHECK((g.v[0] | g.v[1] | g.v[2] | g.v[3] | g.v[4]) != 0);
   738  
   739      for (count = 0; count < JACOBI64_ITERATIONS; ++count) {
   740          /* Compute transition matrix and new eta after 62 posdivsteps. */
   741          secp256k1_modinv64_trans2x2 t;
   742          eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac);
   743          /* Update f,g using that transition matrix. */
   744          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
   745          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   746          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
   747          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g < modulus */
   748  
   749          secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
   750          /* If the bottom limb of f is 1, there is a chance that f=1. */
   751          if (f.v[0] == 1) {
   752              cond = 0;
   753              /* Check if the other limbs are also 0. */
   754              for (j = 1; j < len; ++j) {
   755                  cond |= f.v[j];
   756              }
   757              /* If so, we're done. When f=1, the Jacobi symbol (g | f)=1. */
   758              if (cond == 0) return 1 - 2*(jac & 1);
   759          }
   760  
   761          /* Determine if len>1 and limb (len-1) of both f and g is 0. */
   762          fn = f.v[len - 1];
   763          gn = g.v[len - 1];
   764          cond = ((int64_t)len - 2) >> 63;
   765          cond |= fn;
   766          cond |= gn;
   767          /* If so, reduce length. */
   768          if (cond == 0) --len;
   769  
   770          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
   771          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
   772          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
   773          VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g < modulus */
   774      }
   775  
   776      /* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */
   777      return 0;
   778  }
   779  
   780  #endif /* SECP256K1_MODINV64_IMPL_H */