github.com/aergoio/aergo@v1.3.1/libtool/src/gmp-6.1.2/mpn/generic/invertappr.c (about)

     1  /* mpn_invertappr and helper functions.  Compute I such that
     2     floor((B^{2n}-1)/U - 1 <= I + B^n <= floor((B^{2n}-1)/U.
     3  
     4     Contributed to the GNU project by Marco Bodrato.
     5  
     6     The algorithm used here was inspired by ApproximateReciprocal from "Modern
     7     Computer Arithmetic", by Richard P. Brent and Paul Zimmermann.  Special
     8     thanks to Paul Zimmermann for his very valuable suggestions on all the
     9     theoretical aspects during the work on this code.
    10  
    11     THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
    12     SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
    13     GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
    14  
    15  Copyright (C) 2007, 2009, 2010, 2012, 2015 Free Software Foundation, Inc.
    16  
    17  This file is part of the GNU MP Library.
    18  
    19  The GNU MP Library is free software; you can redistribute it and/or modify
    20  it under the terms of either:
    21  
    22    * the GNU Lesser General Public License as published by the Free
    23      Software Foundation; either version 3 of the License, or (at your
    24      option) any later version.
    25  
    26  or
    27  
    28    * the GNU General Public License as published by the Free Software
    29      Foundation; either version 2 of the License, or (at your option) any
    30      later version.
    31  
    32  or both in parallel, as here.
    33  
    34  The GNU MP Library is distributed in the hope that it will be useful, but
    35  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
    36  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    37  for more details.
    38  
    39  You should have received copies of the GNU General Public License and the
    40  GNU Lesser General Public License along with the GNU MP Library.  If not,
    41  see https://www.gnu.org/licenses/.  */
    42  
    43  #include "gmp.h"
    44  #include "gmp-impl.h"
    45  #include "longlong.h"
    46  
    47  /* FIXME: The iterative version splits the operand in two slightly unbalanced
    48     parts, the use of log_2 (or counting the bits) underestimate the maximum
    49     number of iterations.  */
    50  
    51  #if TUNE_PROGRAM_BUILD
    52  #define NPOWS \
    53   ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)))
    54  #define MAYBE_dcpi1_divappr   1
    55  #else
    56  #define NPOWS \
    57   ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)) - LOG2C (INV_NEWTON_THRESHOLD))
    58  #define MAYBE_dcpi1_divappr \
    59    (INV_NEWTON_THRESHOLD < DC_DIVAPPR_Q_THRESHOLD)
    60  #if (INV_NEWTON_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD) && \
    61      (INV_APPR_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD)
    62  #undef  INV_MULMOD_BNM1_THRESHOLD
    63  #define INV_MULMOD_BNM1_THRESHOLD 0 /* always when Newton */
    64  #endif
    65  #endif
    66  
    67  /* All the three functions mpn{,_bc,_ni}_invertappr (ip, dp, n, scratch), take
    68     the strictly normalised value {dp,n} (i.e., most significant bit must be set)
    69     as an input, and compute {ip,n}: the approximate reciprocal of {dp,n}.
    70  
    71     Let e = mpn*_invertappr (ip, dp, n, scratch) be the returned value; the
    72     following conditions are satisfied by the output:
    73       0 <= e <= 1;
    74       {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1+e) .
    75     I.e. e=0 means that the result {ip,n} equals the one given by mpn_invert.
    76  	e=1 means that the result _may_ be one less than expected.
    77  
    78     The _bc version returns e=1 most of the time.
    79     The _ni version should return e=0 most of the time; only about 1% of
    80     possible random input should give e=1.
    81  
    82     When the strict result is needed, i.e., e=0 in the relation above:
    83       {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1) ;
    84     the function mpn_invert (ip, dp, n, scratch) should be used instead.  */
    85  
    86  /* Maximum scratch needed by this branch (at xp): 2*n */
    87  static mp_limb_t
    88  mpn_bc_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr xp)
    89  {
    90    ASSERT (n > 0);
    91    ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
    92    ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
    93    ASSERT (! MPN_OVERLAP_P (ip, n, xp, mpn_invertappr_itch(n)));
    94    ASSERT (! MPN_OVERLAP_P (dp, n, xp, mpn_invertappr_itch(n)));
    95  
    96    /* Compute a base value of r limbs. */
    97    if (n == 1)
    98      invert_limb (*ip, *dp);
    99    else {
   100      mp_size_t i;
   101  
   102      /* n > 1 here */
   103      i = n;
   104      do
   105        xp[--i] = GMP_NUMB_MAX;
   106      while (i);
   107      mpn_com (xp + n, dp, n);
   108  
   109      /* Now xp contains B^2n - {dp,n}*B^n - 1 */
   110  
   111      /* FIXME: if mpn_*pi1_divappr_q handles n==2, use it! */
   112      if (n == 2) {
   113        mpn_divrem_2 (ip, 0, xp, 4, dp);
   114      } else {
   115        gmp_pi1_t inv;
   116        invert_pi1 (inv, dp[n-1], dp[n-2]);
   117        if (! MAYBE_dcpi1_divappr
   118  	  || BELOW_THRESHOLD (n, DC_DIVAPPR_Q_THRESHOLD))
   119  	mpn_sbpi1_divappr_q (ip, xp, 2 * n, dp, n, inv.inv32);
   120        else
   121  	mpn_dcpi1_divappr_q (ip, xp, 2 * n, dp, n, &inv);
   122        MPN_DECR_U(ip, n, CNST_LIMB (1));
   123        return 1;
   124      }
   125    }
   126    return 0;
   127  }
   128  
   129  /* mpn_ni_invertappr: computes the approximate reciprocal using Newton's
   130     iterations (at least one).
   131  
   132     Inspired by Algorithm "ApproximateReciprocal", published in "Modern Computer
   133     Arithmetic" by Richard P. Brent and Paul Zimmermann, algorithm 3.5, page 121
   134     in version 0.4 of the book.
   135  
   136     Some adaptations were introduced, to allow product mod B^m-1 and return the
   137     value e.
   138  
   139     We introduced a correction in such a way that "the value of
   140     B^{n+h}-T computed at step 8 cannot exceed B^n-1" (the book reads
   141     "2B^n-1").
   142  
   143     Maximum scratch needed by this branch <= 2*n, but have to fit 3*rn
   144     in the scratch, i.e. 3*rn <= 2*n: we require n>4.
   145  
   146     We use a wrapped product modulo B^m-1.  NOTE: is there any normalisation
   147     problem for the [0] class?  It shouldn't: we compute 2*|A*X_h - B^{n+h}| <
   148     B^m-1.  We may get [0] if and only if we get AX_h = B^{n+h}.  This can
   149     happen only if A=B^{n}/2, but this implies X_h = B^{h}*2-1 i.e., AX_h =
   150     B^{n+h} - A, then we get into the "negative" branch, where X_h is not
   151     incremented (because A < B^n).
   152  
   153     FIXME: the scratch for mulmod_bnm1 does not currently fit in the scratch, it
   154     is allocated apart.
   155   */
   156  
   157  mp_limb_t
   158  mpn_ni_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
   159  {
   160    mp_limb_t cy;
   161    mp_size_t rn, mn;
   162    mp_size_t sizes[NPOWS], *sizp;
   163    mp_ptr tp;
   164    TMP_DECL;
   165  #define xp scratch
   166  
   167    ASSERT (n > 4);
   168    ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
   169    ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
   170    ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
   171    ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
   172  
   173    /* Compute the computation precisions from highest to lowest, leaving the
   174       base case size in 'rn'.  */
   175    sizp = sizes;
   176    rn = n;
   177    do {
   178      *sizp = rn;
   179      rn = (rn >> 1) + 1;
   180      ++sizp;
   181    } while (ABOVE_THRESHOLD (rn, INV_NEWTON_THRESHOLD));
   182  
   183    /* We search the inverse of 0.{dp,n}, we compute it as 1.{ip,n} */
   184    dp += n;
   185    ip += n;
   186  
   187    /* Compute a base value of rn limbs. */
   188    mpn_bc_invertappr (ip - rn, dp - rn, rn, scratch);
   189  
   190    TMP_MARK;
   191  
   192    if (ABOVE_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD))
   193      {
   194        mn = mpn_mulmod_bnm1_next_size (n + 1);
   195        tp = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (mn, n, (n >> 1) + 1));
   196      }
   197    /* Use Newton's iterations to get the desired precision.*/
   198  
   199    while (1) {
   200      n = *--sizp;
   201      /*
   202        v    n  v
   203        +----+--+
   204        ^ rn ^
   205      */
   206  
   207      /* Compute i_jd . */
   208      if (BELOW_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD)
   209  	|| ((mn = mpn_mulmod_bnm1_next_size (n + 1)) > (n + rn))) {
   210        /* FIXME: We do only need {xp,n+1}*/
   211        mpn_mul (xp, dp - n, n, ip - rn, rn);
   212        mpn_add_n (xp + rn, xp + rn, dp - n, n - rn + 1);
   213        cy = CNST_LIMB(1); /* Remember we truncated, Mod B^(n+1) */
   214        /* We computed (truncated) {xp,n+1} <- 1.{ip,rn} * 0.{dp,n} */
   215      } else { /* Use B^mn-1 wraparound */
   216        mpn_mulmod_bnm1 (xp, mn, dp - n, n, ip - rn, rn, tp);
   217        /* We computed {xp,mn} <- {ip,rn} * {dp,n} mod (B^mn-1) */
   218        /* We know that 2*|ip*dp + dp*B^rn - B^{rn+n}| < B^mn-1 */
   219        /* Add dp*B^rn mod (B^mn-1) */
   220        ASSERT (n >= mn - rn);
   221        cy = mpn_add_n (xp + rn, xp + rn, dp - n, mn - rn);
   222        cy = mpn_add_nc (xp, xp, dp - (n - (mn - rn)), n - (mn - rn), cy);
   223        /* Subtract B^{rn+n}, maybe only compensate the carry*/
   224        xp[mn] = CNST_LIMB (1); /* set a limit for DECR_U */
   225        MPN_DECR_U (xp + rn + n - mn, 2 * mn + 1 - rn - n, CNST_LIMB (1) - cy);
   226        MPN_DECR_U (xp, mn, CNST_LIMB (1) - xp[mn]); /* if DECR_U eroded xp[mn] */
   227        cy = CNST_LIMB(0); /* Remember we are working Mod B^mn-1 */
   228      }
   229  
   230      if (xp[n] < CNST_LIMB (2)) { /* "positive" residue class */
   231        cy = xp[n]; /* 0 <= cy <= 1 here. */
   232  #if HAVE_NATIVE_mpn_sublsh1_n
   233        if (cy++) {
   234  	if (mpn_cmp (xp, dp - n, n) > 0) {
   235  	  mp_limb_t chk;
   236  	  chk = mpn_sublsh1_n (xp, xp, dp - n, n);
   237  	  ASSERT (chk == xp[n]);
   238  	  ++ cy;
   239  	} else
   240  	  ASSERT_CARRY (mpn_sub_n (xp, xp, dp - n, n));
   241        }
   242  #else /* no mpn_sublsh1_n*/
   243        if (cy++ && !mpn_sub_n (xp, xp, dp - n, n)) {
   244  	ASSERT_CARRY (mpn_sub_n (xp, xp, dp - n, n));
   245  	++cy;
   246        }
   247  #endif
   248        /* 1 <= cy <= 3 here. */
   249  #if HAVE_NATIVE_mpn_rsblsh1_n
   250        if (mpn_cmp (xp, dp - n, n) > 0) {
   251  	ASSERT_NOCARRY (mpn_rsblsh1_n (xp + n, xp, dp - n, n));
   252  	++cy;
   253        } else
   254  	ASSERT_NOCARRY (mpn_sub_nc (xp + 2 * n - rn, dp - rn, xp + n - rn, rn, mpn_cmp (xp, dp - n, n - rn) > 0));
   255  #else /* no mpn_rsblsh1_n*/
   256        if (mpn_cmp (xp, dp - n, n) > 0) {
   257  	ASSERT_NOCARRY (mpn_sub_n (xp, xp, dp - n, n));
   258  	++cy;
   259        }
   260        ASSERT_NOCARRY (mpn_sub_nc (xp + 2 * n - rn, dp - rn, xp + n - rn, rn, mpn_cmp (xp, dp - n, n - rn) > 0));
   261  #endif
   262        MPN_DECR_U(ip - rn, rn, cy); /* 1 <= cy <= 4 here. */
   263      } else { /* "negative" residue class */
   264        ASSERT (xp[n] >= GMP_NUMB_MAX - CNST_LIMB(1));
   265        MPN_DECR_U(xp, n + 1, cy);
   266        if (xp[n] != GMP_NUMB_MAX) {
   267  	MPN_INCR_U(ip - rn, rn, CNST_LIMB (1));
   268  	ASSERT_CARRY (mpn_add_n (xp, xp, dp - n, n));
   269        }
   270        mpn_com (xp + 2 * n - rn, xp + n - rn, rn);
   271      }
   272  
   273      /* Compute x_ju_j. FIXME:We need {xp+rn,rn}, mulhi? */
   274      mpn_mul_n (xp, xp + 2 * n - rn, ip - rn, rn);
   275      cy = mpn_add_n (xp + rn, xp + rn, xp + 2 * n - rn, 2 * rn - n);
   276      cy = mpn_add_nc (ip - n, xp + 3 * rn - n, xp + n + rn, n - rn, cy);
   277      MPN_INCR_U (ip - rn, rn, cy);
   278      if (sizp == sizes) { /* Get out of the cycle */
   279        /* Check for possible carry propagation from below. */
   280        cy = xp[3 * rn - n - 1] > GMP_NUMB_MAX - CNST_LIMB (7); /* Be conservative. */
   281        /*    cy = mpn_add_1 (xp + rn, xp + rn, 2*rn - n, 4); */
   282        break;
   283      }
   284      rn = n;
   285    }
   286    TMP_FREE;
   287  
   288    return cy;
   289  #undef xp
   290  }
   291  
   292  mp_limb_t
   293  mpn_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
   294  {
   295    ASSERT (n > 0);
   296    ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
   297    ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
   298    ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
   299    ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
   300  
   301    if (BELOW_THRESHOLD (n, INV_NEWTON_THRESHOLD))
   302      return mpn_bc_invertappr (ip, dp, n, scratch);
   303    else
   304      return mpn_ni_invertappr (ip, dp, n, scratch);
   305  }