gitee.com/liu-zhao234568/cntest@v1.0.0/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h (about)

     1  /**********************************************************************
     2   * Copyright (c) 2014 Pieter Wuille                                   *
     3   * Distributed under the MIT software license, see the accompanying   *
     4   * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
     5   **********************************************************************/
     6  
     7  #ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
     8  #define _SECP256K1_SCALAR_REPR_IMPL_H_
     9  
    10  /* Limbs of the secp256k1 order. */
    11  #define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
    12  #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
    13  #define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
    14  #define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
    15  #define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
    16  #define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
    17  #define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
    18  #define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
    19  
    20  /* Limbs of 2^256 minus the secp256k1 order. */
    21  #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
    22  #define SECP256K1_N_C_1 (~SECP256K1_N_1)
    23  #define SECP256K1_N_C_2 (~SECP256K1_N_2)
    24  #define SECP256K1_N_C_3 (~SECP256K1_N_3)
    25  #define SECP256K1_N_C_4 (1)
    26  
    27  /* Limbs of half the secp256k1 order. */
    28  #define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
    29  #define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
    30  #define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
    31  #define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
    32  #define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
    33  #define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
    34  #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
    35  #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
    36  
    37  SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
    38      r->d[0] = 0;
    39      r->d[1] = 0;
    40      r->d[2] = 0;
    41      r->d[3] = 0;
    42      r->d[4] = 0;
    43      r->d[5] = 0;
    44      r->d[6] = 0;
    45      r->d[7] = 0;
    46  }
    47  
    48  SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
    49      r->d[0] = v;
    50      r->d[1] = 0;
    51      r->d[2] = 0;
    52      r->d[3] = 0;
    53      r->d[4] = 0;
    54      r->d[5] = 0;
    55      r->d[6] = 0;
    56      r->d[7] = 0;
    57  }
    58  
    59  SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
    60      VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
    61      return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
    62  }
    63  
    64  SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
    65      VERIFY_CHECK(count < 32);
    66      VERIFY_CHECK(offset + count <= 256);
    67      if ((offset + count - 1) >> 5 == offset >> 5) {
    68          return secp256k1_scalar_get_bits(a, offset, count);
    69      } else {
    70          VERIFY_CHECK((offset >> 5) + 1 < 8);
    71          return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
    72      }
    73  }
    74  
    75  SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
    76      int yes = 0;
    77      int no = 0;
    78      no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */
    79      no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */
    80      no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */
    81      no |= (a->d[4] < SECP256K1_N_4);
    82      yes |= (a->d[4] > SECP256K1_N_4) & ~no;
    83      no |= (a->d[3] < SECP256K1_N_3) & ~yes;
    84      yes |= (a->d[3] > SECP256K1_N_3) & ~no;
    85      no |= (a->d[2] < SECP256K1_N_2) & ~yes;
    86      yes |= (a->d[2] > SECP256K1_N_2) & ~no;
    87      no |= (a->d[1] < SECP256K1_N_1) & ~yes;
    88      yes |= (a->d[1] > SECP256K1_N_1) & ~no;
    89      yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
    90      return yes;
    91  }
    92  
    93  SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) {
    94      uint64_t t;
    95      VERIFY_CHECK(overflow <= 1);
    96      t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
    97      r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
    98      t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1;
    99      r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
   100      t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2;
   101      r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
   102      t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3;
   103      r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
   104      t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4;
   105      r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
   106      t += (uint64_t)r->d[5];
   107      r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
   108      t += (uint64_t)r->d[6];
   109      r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
   110      t += (uint64_t)r->d[7];
   111      r->d[7] = t & 0xFFFFFFFFUL;
   112      return overflow;
   113  }
   114  
   115  static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
   116      int overflow;
   117      uint64_t t = (uint64_t)a->d[0] + b->d[0];
   118      r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
   119      t += (uint64_t)a->d[1] + b->d[1];
   120      r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
   121      t += (uint64_t)a->d[2] + b->d[2];
   122      r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
   123      t += (uint64_t)a->d[3] + b->d[3];
   124      r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
   125      t += (uint64_t)a->d[4] + b->d[4];
   126      r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
   127      t += (uint64_t)a->d[5] + b->d[5];
   128      r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
   129      t += (uint64_t)a->d[6] + b->d[6];
   130      r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
   131      t += (uint64_t)a->d[7] + b->d[7];
   132      r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
   133      overflow = t + secp256k1_scalar_check_overflow(r);
   134      VERIFY_CHECK(overflow == 0 || overflow == 1);
   135      secp256k1_scalar_reduce(r, overflow);
   136      return overflow;
   137  }
   138  
   139  static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
   140      uint64_t t;
   141      VERIFY_CHECK(bit < 256);
   142      bit += ((uint32_t) flag - 1) & 0x100;  /* forcing (bit >> 5) > 7 makes this a noop */
   143      t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
   144      r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
   145      t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
   146      r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
   147      t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F));
   148      r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
   149      t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F));
   150      r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
   151      t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F));
   152      r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
   153      t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F));
   154      r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
   155      t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F));
   156      r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
   157      t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
   158      r->d[7] = t & 0xFFFFFFFFULL;
   159  #ifdef VERIFY
   160      VERIFY_CHECK((t >> 32) == 0);
   161      VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
   162  #endif
   163  }
   164  
   165  static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
   166      int over;
   167      r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
   168      r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
   169      r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24;
   170      r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24;
   171      r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24;
   172      r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
   173      r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
   174      r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
   175      over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
   176      if (overflow) {
   177          *overflow = over;
   178      }
   179  }
   180  
   181  static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
   182      bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7];
   183      bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6];
   184      bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5];
   185      bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4];
   186      bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3];
   187      bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2];
   188      bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1];
   189      bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
   190  }
   191  
   192  SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
   193      return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
   194  }
   195  
   196  static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
   197      uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
   198      uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
   199      r->d[0] = t & nonzero; t >>= 32;
   200      t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
   201      r->d[1] = t & nonzero; t >>= 32;
   202      t += (uint64_t)(~a->d[2]) + SECP256K1_N_2;
   203      r->d[2] = t & nonzero; t >>= 32;
   204      t += (uint64_t)(~a->d[3]) + SECP256K1_N_3;
   205      r->d[3] = t & nonzero; t >>= 32;
   206      t += (uint64_t)(~a->d[4]) + SECP256K1_N_4;
   207      r->d[4] = t & nonzero; t >>= 32;
   208      t += (uint64_t)(~a->d[5]) + SECP256K1_N_5;
   209      r->d[5] = t & nonzero; t >>= 32;
   210      t += (uint64_t)(~a->d[6]) + SECP256K1_N_6;
   211      r->d[6] = t & nonzero; t >>= 32;
   212      t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
   213      r->d[7] = t & nonzero;
   214  }
   215  
   216  SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
   217      return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
   218  }
   219  
   220  static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
   221      int yes = 0;
   222      int no = 0;
   223      no |= (a->d[7] < SECP256K1_N_H_7);
   224      yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
   225      no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */
   226      no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */
   227      no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */
   228      no |= (a->d[3] < SECP256K1_N_H_3) & ~yes;
   229      yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
   230      no |= (a->d[2] < SECP256K1_N_H_2) & ~yes;
   231      yes |= (a->d[2] > SECP256K1_N_H_2) & ~no;
   232      no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
   233      yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
   234      yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
   235      return yes;
   236  }
   237  
   238  static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
   239      /* If we are flag = 0, mask = 00...00 and this is a no-op;
   240       * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
   241      uint32_t mask = !flag - 1;
   242      uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
   243      uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
   244      r->d[0] = t & nonzero; t >>= 32;
   245      t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
   246      r->d[1] = t & nonzero; t >>= 32;
   247      t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
   248      r->d[2] = t & nonzero; t >>= 32;
   249      t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
   250      r->d[3] = t & nonzero; t >>= 32;
   251      t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask);
   252      r->d[4] = t & nonzero; t >>= 32;
   253      t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask);
   254      r->d[5] = t & nonzero; t >>= 32;
   255      t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask);
   256      r->d[6] = t & nonzero; t >>= 32;
   257      t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
   258      r->d[7] = t & nonzero;
   259      return 2 * (mask == 0) - 1;
   260  }
   261  
   262  
   263  /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
   264  
   265  /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
   266  #define muladd(a,b) { \
   267      uint32_t tl, th; \
   268      { \
   269          uint64_t t = (uint64_t)a * b; \
   270          th = t >> 32;         /* at most 0xFFFFFFFE */ \
   271          tl = t; \
   272      } \
   273      c0 += tl;                 /* overflow is handled on the next line */ \
   274      th += (c0 < tl) ? 1 : 0;  /* at most 0xFFFFFFFF */ \
   275      c1 += th;                 /* overflow is handled on the next line */ \
   276      c2 += (c1 < th) ? 1 : 0;  /* never overflows by contract (verified in the next line) */ \
   277      VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
   278  }
   279  
   280  /** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
   281  #define muladd_fast(a,b) { \
   282      uint32_t tl, th; \
   283      { \
   284          uint64_t t = (uint64_t)a * b; \
   285          th = t >> 32;         /* at most 0xFFFFFFFE */ \
   286          tl = t; \
   287      } \
   288      c0 += tl;                 /* overflow is handled on the next line */ \
   289      th += (c0 < tl) ? 1 : 0;  /* at most 0xFFFFFFFF */ \
   290      c1 += th;                 /* never overflows by contract (verified in the next line) */ \
   291      VERIFY_CHECK(c1 >= th); \
   292  }
   293  
   294  /** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
   295  #define muladd2(a,b) { \
   296      uint32_t tl, th, th2, tl2; \
   297      { \
   298          uint64_t t = (uint64_t)a * b; \
   299          th = t >> 32;               /* at most 0xFFFFFFFE */ \
   300          tl = t; \
   301      } \
   302      th2 = th + th;                  /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
   303      c2 += (th2 < th) ? 1 : 0;       /* never overflows by contract (verified the next line) */ \
   304      VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
   305      tl2 = tl + tl;                  /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
   306      th2 += (tl2 < tl) ? 1 : 0;      /* at most 0xFFFFFFFF */ \
   307      c0 += tl2;                      /* overflow is handled on the next line */ \
   308      th2 += (c0 < tl2) ? 1 : 0;      /* second overflow is handled on the next line */ \
   309      c2 += (c0 < tl2) & (th2 == 0);  /* never overflows by contract (verified the next line) */ \
   310      VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
   311      c1 += th2;                      /* overflow is handled on the next line */ \
   312      c2 += (c1 < th2) ? 1 : 0;       /* never overflows by contract (verified the next line) */ \
   313      VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
   314  }
   315  
   316  /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
   317  #define sumadd(a) { \
   318      unsigned int over; \
   319      c0 += (a);                  /* overflow is handled on the next line */ \
   320      over = (c0 < (a)) ? 1 : 0; \
   321      c1 += over;                 /* overflow is handled on the next line */ \
   322      c2 += (c1 < over) ? 1 : 0;  /* never overflows by contract */ \
   323  }
   324  
   325  /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
   326  #define sumadd_fast(a) { \
   327      c0 += (a);                 /* overflow is handled on the next line */ \
   328      c1 += (c0 < (a)) ? 1 : 0;  /* never overflows by contract (verified the next line) */ \
   329      VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
   330      VERIFY_CHECK(c2 == 0); \
   331  }
   332  
   333  /** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */
   334  #define extract(n) { \
   335      (n) = c0; \
   336      c0 = c1; \
   337      c1 = c2; \
   338      c2 = 0; \
   339  }
   340  
   341  /** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */
   342  #define extract_fast(n) { \
   343      (n) = c0; \
   344      c0 = c1; \
   345      c1 = 0; \
   346      VERIFY_CHECK(c2 == 0); \
   347  }
   348  
   349  static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) {
   350      uint64_t c;
   351      uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
   352      uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
   353      uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8;
   354  
   355      /* 96 bit accumulator. */
   356      uint32_t c0, c1, c2;
   357  
   358      /* Reduce 512 bits into 385. */
   359      /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */
   360      c0 = l[0]; c1 = 0; c2 = 0;
   361      muladd_fast(n0, SECP256K1_N_C_0);
   362      extract_fast(m0);
   363      sumadd_fast(l[1]);
   364      muladd(n1, SECP256K1_N_C_0);
   365      muladd(n0, SECP256K1_N_C_1);
   366      extract(m1);
   367      sumadd(l[2]);
   368      muladd(n2, SECP256K1_N_C_0);
   369      muladd(n1, SECP256K1_N_C_1);
   370      muladd(n0, SECP256K1_N_C_2);
   371      extract(m2);
   372      sumadd(l[3]);
   373      muladd(n3, SECP256K1_N_C_0);
   374      muladd(n2, SECP256K1_N_C_1);
   375      muladd(n1, SECP256K1_N_C_2);
   376      muladd(n0, SECP256K1_N_C_3);
   377      extract(m3);
   378      sumadd(l[4]);
   379      muladd(n4, SECP256K1_N_C_0);
   380      muladd(n3, SECP256K1_N_C_1);
   381      muladd(n2, SECP256K1_N_C_2);
   382      muladd(n1, SECP256K1_N_C_3);
   383      sumadd(n0);
   384      extract(m4);
   385      sumadd(l[5]);
   386      muladd(n5, SECP256K1_N_C_0);
   387      muladd(n4, SECP256K1_N_C_1);
   388      muladd(n3, SECP256K1_N_C_2);
   389      muladd(n2, SECP256K1_N_C_3);
   390      sumadd(n1);
   391      extract(m5);
   392      sumadd(l[6]);
   393      muladd(n6, SECP256K1_N_C_0);
   394      muladd(n5, SECP256K1_N_C_1);
   395      muladd(n4, SECP256K1_N_C_2);
   396      muladd(n3, SECP256K1_N_C_3);
   397      sumadd(n2);
   398      extract(m6);
   399      sumadd(l[7]);
   400      muladd(n7, SECP256K1_N_C_0);
   401      muladd(n6, SECP256K1_N_C_1);
   402      muladd(n5, SECP256K1_N_C_2);
   403      muladd(n4, SECP256K1_N_C_3);
   404      sumadd(n3);
   405      extract(m7);
   406      muladd(n7, SECP256K1_N_C_1);
   407      muladd(n6, SECP256K1_N_C_2);
   408      muladd(n5, SECP256K1_N_C_3);
   409      sumadd(n4);
   410      extract(m8);
   411      muladd(n7, SECP256K1_N_C_2);
   412      muladd(n6, SECP256K1_N_C_3);
   413      sumadd(n5);
   414      extract(m9);
   415      muladd(n7, SECP256K1_N_C_3);
   416      sumadd(n6);
   417      extract(m10);
   418      sumadd_fast(n7);
   419      extract_fast(m11);
   420      VERIFY_CHECK(c0 <= 1);
   421      m12 = c0;
   422  
   423      /* Reduce 385 bits into 258. */
   424      /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */
   425      c0 = m0; c1 = 0; c2 = 0;
   426      muladd_fast(m8, SECP256K1_N_C_0);
   427      extract_fast(p0);
   428      sumadd_fast(m1);
   429      muladd(m9, SECP256K1_N_C_0);
   430      muladd(m8, SECP256K1_N_C_1);
   431      extract(p1);
   432      sumadd(m2);
   433      muladd(m10, SECP256K1_N_C_0);
   434      muladd(m9, SECP256K1_N_C_1);
   435      muladd(m8, SECP256K1_N_C_2);
   436      extract(p2);
   437      sumadd(m3);
   438      muladd(m11, SECP256K1_N_C_0);
   439      muladd(m10, SECP256K1_N_C_1);
   440      muladd(m9, SECP256K1_N_C_2);
   441      muladd(m8, SECP256K1_N_C_3);
   442      extract(p3);
   443      sumadd(m4);
   444      muladd(m12, SECP256K1_N_C_0);
   445      muladd(m11, SECP256K1_N_C_1);
   446      muladd(m10, SECP256K1_N_C_2);
   447      muladd(m9, SECP256K1_N_C_3);
   448      sumadd(m8);
   449      extract(p4);
   450      sumadd(m5);
   451      muladd(m12, SECP256K1_N_C_1);
   452      muladd(m11, SECP256K1_N_C_2);
   453      muladd(m10, SECP256K1_N_C_3);
   454      sumadd(m9);
   455      extract(p5);
   456      sumadd(m6);
   457      muladd(m12, SECP256K1_N_C_2);
   458      muladd(m11, SECP256K1_N_C_3);
   459      sumadd(m10);
   460      extract(p6);
   461      sumadd_fast(m7);
   462      muladd_fast(m12, SECP256K1_N_C_3);
   463      sumadd_fast(m11);
   464      extract_fast(p7);
   465      p8 = c0 + m12;
   466      VERIFY_CHECK(p8 <= 2);
   467  
   468      /* Reduce 258 bits into 256. */
   469      /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */
   470      c = p0 + (uint64_t)SECP256K1_N_C_0 * p8;
   471      r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
   472      c += p1 + (uint64_t)SECP256K1_N_C_1 * p8;
   473      r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
   474      c += p2 + (uint64_t)SECP256K1_N_C_2 * p8;
   475      r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
   476      c += p3 + (uint64_t)SECP256K1_N_C_3 * p8;
   477      r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
   478      c += p4 + (uint64_t)p8;
   479      r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
   480      c += p5;
   481      r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
   482      c += p6;
   483      r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
   484      c += p7;
   485      r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
   486  
   487      /* Final reduction of r. */
   488      secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
   489  }
   490  
   491  static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) {
   492      /* 96 bit accumulator. */
   493      uint32_t c0 = 0, c1 = 0, c2 = 0;
   494  
   495      /* l[0..15] = a[0..7] * b[0..7]. */
   496      muladd_fast(a->d[0], b->d[0]);
   497      extract_fast(l[0]);
   498      muladd(a->d[0], b->d[1]);
   499      muladd(a->d[1], b->d[0]);
   500      extract(l[1]);
   501      muladd(a->d[0], b->d[2]);
   502      muladd(a->d[1], b->d[1]);
   503      muladd(a->d[2], b->d[0]);
   504      extract(l[2]);
   505      muladd(a->d[0], b->d[3]);
   506      muladd(a->d[1], b->d[2]);
   507      muladd(a->d[2], b->d[1]);
   508      muladd(a->d[3], b->d[0]);
   509      extract(l[3]);
   510      muladd(a->d[0], b->d[4]);
   511      muladd(a->d[1], b->d[3]);
   512      muladd(a->d[2], b->d[2]);
   513      muladd(a->d[3], b->d[1]);
   514      muladd(a->d[4], b->d[0]);
   515      extract(l[4]);
   516      muladd(a->d[0], b->d[5]);
   517      muladd(a->d[1], b->d[4]);
   518      muladd(a->d[2], b->d[3]);
   519      muladd(a->d[3], b->d[2]);
   520      muladd(a->d[4], b->d[1]);
   521      muladd(a->d[5], b->d[0]);
   522      extract(l[5]);
   523      muladd(a->d[0], b->d[6]);
   524      muladd(a->d[1], b->d[5]);
   525      muladd(a->d[2], b->d[4]);
   526      muladd(a->d[3], b->d[3]);
   527      muladd(a->d[4], b->d[2]);
   528      muladd(a->d[5], b->d[1]);
   529      muladd(a->d[6], b->d[0]);
   530      extract(l[6]);
   531      muladd(a->d[0], b->d[7]);
   532      muladd(a->d[1], b->d[6]);
   533      muladd(a->d[2], b->d[5]);
   534      muladd(a->d[3], b->d[4]);
   535      muladd(a->d[4], b->d[3]);
   536      muladd(a->d[5], b->d[2]);
   537      muladd(a->d[6], b->d[1]);
   538      muladd(a->d[7], b->d[0]);
   539      extract(l[7]);
   540      muladd(a->d[1], b->d[7]);
   541      muladd(a->d[2], b->d[6]);
   542      muladd(a->d[3], b->d[5]);
   543      muladd(a->d[4], b->d[4]);
   544      muladd(a->d[5], b->d[3]);
   545      muladd(a->d[6], b->d[2]);
   546      muladd(a->d[7], b->d[1]);
   547      extract(l[8]);
   548      muladd(a->d[2], b->d[7]);
   549      muladd(a->d[3], b->d[6]);
   550      muladd(a->d[4], b->d[5]);
   551      muladd(a->d[5], b->d[4]);
   552      muladd(a->d[6], b->d[3]);
   553      muladd(a->d[7], b->d[2]);
   554      extract(l[9]);
   555      muladd(a->d[3], b->d[7]);
   556      muladd(a->d[4], b->d[6]);
   557      muladd(a->d[5], b->d[5]);
   558      muladd(a->d[6], b->d[4]);
   559      muladd(a->d[7], b->d[3]);
   560      extract(l[10]);
   561      muladd(a->d[4], b->d[7]);
   562      muladd(a->d[5], b->d[6]);
   563      muladd(a->d[6], b->d[5]);
   564      muladd(a->d[7], b->d[4]);
   565      extract(l[11]);
   566      muladd(a->d[5], b->d[7]);
   567      muladd(a->d[6], b->d[6]);
   568      muladd(a->d[7], b->d[5]);
   569      extract(l[12]);
   570      muladd(a->d[6], b->d[7]);
   571      muladd(a->d[7], b->d[6]);
   572      extract(l[13]);
   573      muladd_fast(a->d[7], b->d[7]);
   574      extract_fast(l[14]);
   575      VERIFY_CHECK(c1 == 0);
   576      l[15] = c0;
   577  }
   578  
   579  static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) {
   580      /* 96 bit accumulator. */
   581      uint32_t c0 = 0, c1 = 0, c2 = 0;
   582  
   583      /* l[0..15] = a[0..7]^2. */
   584      muladd_fast(a->d[0], a->d[0]);
   585      extract_fast(l[0]);
   586      muladd2(a->d[0], a->d[1]);
   587      extract(l[1]);
   588      muladd2(a->d[0], a->d[2]);
   589      muladd(a->d[1], a->d[1]);
   590      extract(l[2]);
   591      muladd2(a->d[0], a->d[3]);
   592      muladd2(a->d[1], a->d[2]);
   593      extract(l[3]);
   594      muladd2(a->d[0], a->d[4]);
   595      muladd2(a->d[1], a->d[3]);
   596      muladd(a->d[2], a->d[2]);
   597      extract(l[4]);
   598      muladd2(a->d[0], a->d[5]);
   599      muladd2(a->d[1], a->d[4]);
   600      muladd2(a->d[2], a->d[3]);
   601      extract(l[5]);
   602      muladd2(a->d[0], a->d[6]);
   603      muladd2(a->d[1], a->d[5]);
   604      muladd2(a->d[2], a->d[4]);
   605      muladd(a->d[3], a->d[3]);
   606      extract(l[6]);
   607      muladd2(a->d[0], a->d[7]);
   608      muladd2(a->d[1], a->d[6]);
   609      muladd2(a->d[2], a->d[5]);
   610      muladd2(a->d[3], a->d[4]);
   611      extract(l[7]);
   612      muladd2(a->d[1], a->d[7]);
   613      muladd2(a->d[2], a->d[6]);
   614      muladd2(a->d[3], a->d[5]);
   615      muladd(a->d[4], a->d[4]);
   616      extract(l[8]);
   617      muladd2(a->d[2], a->d[7]);
   618      muladd2(a->d[3], a->d[6]);
   619      muladd2(a->d[4], a->d[5]);
   620      extract(l[9]);
   621      muladd2(a->d[3], a->d[7]);
   622      muladd2(a->d[4], a->d[6]);
   623      muladd(a->d[5], a->d[5]);
   624      extract(l[10]);
   625      muladd2(a->d[4], a->d[7]);
   626      muladd2(a->d[5], a->d[6]);
   627      extract(l[11]);
   628      muladd2(a->d[5], a->d[7]);
   629      muladd(a->d[6], a->d[6]);
   630      extract(l[12]);
   631      muladd2(a->d[6], a->d[7]);
   632      extract(l[13]);
   633      muladd_fast(a->d[7], a->d[7]);
   634      extract_fast(l[14]);
   635      VERIFY_CHECK(c1 == 0);
   636      l[15] = c0;
   637  }
   638  
   639  #undef sumadd
   640  #undef sumadd_fast
   641  #undef muladd
   642  #undef muladd_fast
   643  #undef muladd2
   644  #undef extract
   645  #undef extract_fast
   646  
   647  static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
   648      uint32_t l[16];
   649      secp256k1_scalar_mul_512(l, a, b);
   650      secp256k1_scalar_reduce_512(r, l);
   651  }
   652  
   653  static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
   654      int ret;
   655      VERIFY_CHECK(n > 0);
   656      VERIFY_CHECK(n < 16);
   657      ret = r->d[0] & ((1 << n) - 1);
   658      r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
   659      r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
   660      r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n));
   661      r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n));
   662      r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n));
   663      r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
   664      r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
   665      r->d[7] = (r->d[7] >> n);
   666      return ret;
   667  }
   668  
   669  static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
   670      uint32_t l[16];
   671      secp256k1_scalar_sqr_512(l, a);
   672      secp256k1_scalar_reduce_512(r, l);
   673  }
   674  
   675  #ifdef USE_ENDOMORPHISM
   676  static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
   677      r1->d[0] = a->d[0];
   678      r1->d[1] = a->d[1];
   679      r1->d[2] = a->d[2];
   680      r1->d[3] = a->d[3];
   681      r1->d[4] = 0;
   682      r1->d[5] = 0;
   683      r1->d[6] = 0;
   684      r1->d[7] = 0;
   685      r2->d[0] = a->d[4];
   686      r2->d[1] = a->d[5];
   687      r2->d[2] = a->d[6];
   688      r2->d[3] = a->d[7];
   689      r2->d[4] = 0;
   690      r2->d[5] = 0;
   691      r2->d[6] = 0;
   692      r2->d[7] = 0;
   693  }
   694  #endif
   695  
   696  SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
   697      return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
   698  }
   699  
   700  SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
   701      uint32_t l[16];
   702      unsigned int shiftlimbs;
   703      unsigned int shiftlow;
   704      unsigned int shifthigh;
   705      VERIFY_CHECK(shift >= 256);
   706      secp256k1_scalar_mul_512(l, a, b);
   707      shiftlimbs = shift >> 5;
   708      shiftlow = shift & 0x1F;
   709      shifthigh = 32 - shiftlow;
   710      r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
   711      r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
   712      r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
   713      r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
   714      r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
   715      r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
   716      r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
   717      r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow)  : 0;
   718      secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
   719  }
   720  
   721  #endif