github.com/aergoio/aergo@v1.3.1/libtool/src/gmp-6.1.2/tests/mpn/t-fat.c (about)

     1  /* Test fat binary setups.
     2  
     3  Copyright 2003, 2012 Free Software Foundation, Inc.
     4  
     5  This file is part of the GNU MP Library test suite.
     6  
     7  The GNU MP Library test suite is free software; you can redistribute it
     8  and/or modify it under the terms of the GNU General Public License as
     9  published by the Free Software Foundation; either version 3 of the License,
    10  or (at your option) any later version.
    11  
    12  The GNU MP Library test suite is distributed in the hope that it will be
    13  useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
    14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
    15  Public License for more details.
    16  
    17  You should have received a copy of the GNU General Public License along with
    18  the GNU MP Library test suite.  If not, see https://www.gnu.org/licenses/.  */
    19  
    20  #include <stdio.h>
    21  #include <stdlib.h>
    22  #include <string.h>
    23  
    24  #include "gmp.h"
    25  #include "gmp-impl.h"
    26  #include "longlong.h"
    27  #include "tests.h"
    28  
    29  
    30  /* In this program we're aiming to pick up certain subtle problems that
    31     might creep into a fat binary.
    32  
    33     1. We want to ensure the application entry point routines like
    34        __gmpn_add_n dispatch to the correct field of __gmpn_cpuvec.
    35  
    36        Note that these routines are not exercised as a side effect of other
    37        tests (eg. the mpz routines).  Internally the fields of __gmpn_cpuvec
    38        are used directly, so we need to write test code explicitly calling
    39        the mpn functions, like an application will have.
    40  
    41     2. We want to ensure the initial __gmpn_cpuvec data has the initializer
    42        function pointers in the correct fields, and that those initializer
    43        functions dispatch to their correct corresponding field once
    44        initialization has been done.
    45  
    46        Only one of the initializer routines executes in a normal program,
    47        since that routine sets all the pointers to actual mpn functions.  We
    48        forcibly reset __gmpn_cpuvec so we can run each.
    49  
    50     In both cases for the above, the data put through the functions is
    51     nothing special, just enough to verify that for instance an add_n is
    52     really doing an add_n and has not for instance mistakenly gone to sub_n
    53     or something.
    54  
    55     The loop around each test will exercise the initializer routine on the
    56     first iteration, and the dispatcher routine on the second.
    57  
    58     If the dispatcher and/or initializer routines are generated mechanically
    59     via macros (eg. mpn/x86/fat/fat_entry.asm) then there shouldn't be too
    60     much risk of them going wrong, provided the structure layout is correctly
    61     expressed.  But if they're in C then it's good to guard against typos in
    62     what is rather repetitive code.  The initializer data for __gmpn_cpuvec
    63     in fat.c is always done by hand and is likewise a bit repetitive.  */
    64  
    65  
    66  /* dummies when not a fat binary */
    67  #if ! WANT_FAT_BINARY
    68  struct cpuvec_t {
    69    int  dummy;
    70  };
    71  struct cpuvec_t __gmpn_cpuvec;
    72  #define ITERATE_FAT_THRESHOLDS()  do { } while (0)
    73  #endif
    74  
    75  /* saved from program startup */
    76  struct cpuvec_t  initial_cpuvec;
    77  
    78  void
    79  check_functions (void)
    80  {
    81    mp_limb_t  wp[2], xp[2], yp[2], r;
    82    int  i;
    83  
    84    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
    85    for (i = 0; i < 2; i++)
    86      {
    87        xp[0] = 123;
    88        yp[0] = 456;
    89        mpn_add_n (wp, xp, yp, (mp_size_t) 1);
    90        ASSERT_ALWAYS (wp[0] == 579);
    91      }
    92  
    93    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
    94    for (i = 0; i < 2; i++)
    95      {
    96        xp[0] = 123;
    97        wp[0] = 456;
    98        r = mpn_addmul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
    99        ASSERT_ALWAYS (wp[0] == 702);
   100        ASSERT_ALWAYS (r == 0);
   101      }
   102  
   103  #if HAVE_NATIVE_mpn_copyd
   104    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   105    for (i = 0; i < 2; i++)
   106      {
   107        xp[0] = 123;
   108        xp[1] = 456;
   109        mpn_copyd (xp+1, xp, (mp_size_t) 1);
   110        ASSERT_ALWAYS (xp[1] == 123);
   111      }
   112  #endif
   113  
   114  #if HAVE_NATIVE_mpn_copyi
   115    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   116    for (i = 0; i < 2; i++)
   117      {
   118        xp[0] = 123;
   119        xp[1] = 456;
   120        mpn_copyi (xp, xp+1, (mp_size_t) 1);
   121        ASSERT_ALWAYS (xp[0] == 456);
   122      }
   123  #endif
   124  
   125    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   126    for (i = 0; i < 2; i++)
   127      {
   128        xp[0] = 1605;
   129        mpn_divexact_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(5));
   130        ASSERT_ALWAYS (wp[0] == 321);
   131      }
   132  
   133    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   134    for (i = 0; i < 2; i++)
   135      {
   136        xp[0] = 1296;
   137        r = mpn_divexact_by3c (wp, xp, (mp_size_t) 1, CNST_LIMB(0));
   138        ASSERT_ALWAYS (wp[0] == 432);
   139        ASSERT_ALWAYS (r == 0);
   140      }
   141  
   142    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   143    for (i = 0; i < 2; i++)
   144      {
   145        xp[0] = 287;
   146        r = mpn_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1, CNST_LIMB(7));
   147        ASSERT_ALWAYS (wp[1] == 41);
   148        ASSERT_ALWAYS (wp[0] == 0);
   149        ASSERT_ALWAYS (r == 0);
   150      }
   151  
   152    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   153    for (i = 0; i < 2; i++)
   154      {
   155        xp[0] = 12;
   156        r = mpn_gcd_1 (xp, (mp_size_t) 1, CNST_LIMB(9));
   157        ASSERT_ALWAYS (r == 3);
   158      }
   159  
   160    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   161    for (i = 0; i < 2; i++)
   162      {
   163        xp[0] = 0x1001;
   164        mpn_lshift (wp, xp, (mp_size_t) 1, 1);
   165        ASSERT_ALWAYS (wp[0] == 0x2002);
   166      }
   167  
   168    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   169    for (i = 0; i < 2; i++)
   170      {
   171        xp[0] = 14;
   172        r = mpn_mod_1 (xp, (mp_size_t) 1, CNST_LIMB(4));
   173        ASSERT_ALWAYS (r == 2);
   174      }
   175  
   176  #if (GMP_NUMB_BITS % 4) == 0
   177    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   178    for (i = 0; i < 2; i++)
   179      {
   180        int  bits = (GMP_NUMB_BITS / 4) * 3;
   181        mp_limb_t  mod = (CNST_LIMB(1) << bits) - 1;
   182        mp_limb_t  want = GMP_NUMB_MAX % mod;
   183        xp[0] = GMP_NUMB_MAX;
   184        r = mpn_mod_34lsub1 (xp, (mp_size_t) 1);
   185        ASSERT_ALWAYS (r % mod == want);
   186      }
   187  #endif
   188  
   189    /*   DECL_modexact_1c_odd ((*modexact_1c_odd)); */
   190  
   191    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   192    for (i = 0; i < 2; i++)
   193      {
   194        xp[0] = 14;
   195        r = mpn_mul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(4));
   196        ASSERT_ALWAYS (wp[0] == 56);
   197        ASSERT_ALWAYS (r == 0);
   198      }
   199  
   200    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   201    for (i = 0; i < 2; i++)
   202      {
   203        xp[0] = 5;
   204        yp[0] = 7;
   205        mpn_mul_basecase (wp, xp, (mp_size_t) 1, yp, (mp_size_t) 1);
   206        ASSERT_ALWAYS (wp[0] == 35);
   207        ASSERT_ALWAYS (wp[1] == 0);
   208      }
   209  
   210    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   211    for (i = 0; i < 2; i++)
   212      {
   213        xp[0] = 5;
   214        yp[0] = 7;
   215        mpn_mullo_basecase (wp, xp, yp, (mp_size_t) 1);
   216        ASSERT_ALWAYS (wp[0] == 35);
   217      }
   218  
   219  #if HAVE_NATIVE_mpn_preinv_divrem_1 && GMP_NAIL_BITS == 0
   220    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   221    for (i = 0; i < 2; i++)
   222      {
   223        xp[0] = 0x101;
   224        r = mpn_preinv_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1,
   225                                 GMP_LIMB_HIGHBIT,
   226                                 refmpn_invert_limb (GMP_LIMB_HIGHBIT), 0);
   227        ASSERT_ALWAYS (wp[0] == 0x202);
   228        ASSERT_ALWAYS (wp[1] == 0);
   229        ASSERT_ALWAYS (r == 0);
   230      }
   231  #endif
   232  
   233  #if GMP_NAIL_BITS == 0
   234    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   235    for (i = 0; i < 2; i++)
   236      {
   237        xp[0] = GMP_LIMB_HIGHBIT+123;
   238        r = mpn_preinv_mod_1 (xp, (mp_size_t) 1, GMP_LIMB_HIGHBIT,
   239                              refmpn_invert_limb (GMP_LIMB_HIGHBIT));
   240        ASSERT_ALWAYS (r == 123);
   241      }
   242  #endif
   243  
   244    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   245    for (i = 0; i < 2; i++)
   246      {
   247        xp[0] = 0x8008;
   248        mpn_rshift (wp, xp, (mp_size_t) 1, 1);
   249        ASSERT_ALWAYS (wp[0] == 0x4004);
   250      }
   251  
   252    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   253    for (i = 0; i < 2; i++)
   254      {
   255        xp[0] = 5;
   256        mpn_sqr_basecase (wp, xp, (mp_size_t) 1);
   257        ASSERT_ALWAYS (wp[0] == 25);
   258        ASSERT_ALWAYS (wp[1] == 0);
   259      }
   260  
   261    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   262    for (i = 0; i < 2; i++)
   263      {
   264        xp[0] = 999;
   265        yp[0] = 666;
   266        mpn_sub_n (wp, xp, yp, (mp_size_t) 1);
   267        ASSERT_ALWAYS (wp[0] == 333);
   268      }
   269  
   270    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
   271    for (i = 0; i < 2; i++)
   272      {
   273        xp[0] = 123;
   274        wp[0] = 456;
   275        r = mpn_submul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
   276        ASSERT_ALWAYS (wp[0] == 210);
   277        ASSERT_ALWAYS (r == 0);
   278      }
   279  }
   280  
   281  /* Expect the first use of each fat threshold to invoke the necessary
   282     initialization.  */
   283  void
   284  check_thresholds (void)
   285  {
   286  #define ITERATE(name,field)                                             \
   287    do {                                                                  \
   288      __gmpn_cpuvec_initialized = 0;					\
   289      memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));   \
   290      ASSERT_ALWAYS (name != 0);                                          \
   291      ASSERT_ALWAYS (name == __gmpn_cpuvec.field);                        \
   292      ASSERT_ALWAYS (__gmpn_cpuvec_initialized);                          \
   293    } while (0)
   294  
   295    ITERATE_FAT_THRESHOLDS ();
   296  }
   297  
   298  
   299  int
   300  main (void)
   301  {
   302    memcpy (&initial_cpuvec, &__gmpn_cpuvec, sizeof (__gmpn_cpuvec));
   303  
   304    tests_start ();
   305  
   306    check_functions ();
   307    check_thresholds ();
   308  
   309    tests_end ();
   310    exit (0);
   311  }