github.com/consensys/gnark-crypto@v0.14.0/ecc/bn254/multiexp_affine.go (about)

     1  // Copyright 2020 Consensys Software Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by consensys/gnark-crypto DO NOT EDIT
    16  
    17  package bn254
    18  
    19  import (
    20  	"github.com/consensys/gnark-crypto/ecc/bn254/fp"
    21  	"github.com/consensys/gnark-crypto/ecc/bn254/internal/fptower"
    22  )
    23  
    24  type batchOpG1Affine struct {
    25  	bucketID uint16
    26  	point    G1Affine
    27  }
    28  
    29  // processChunkG1BatchAffine process a chunk of the scalars during the msm
    30  // using affine coordinates for the buckets. To amortize the cost of the inverse in the affine addition
    31  // we use a batch affine addition.
    32  //
    33  // this is derived from a PR by 0x0ece : https://github.com/ConsenSys/gnark-crypto/pull/249
    34  // See Section 5.3: ia.cr/2022/1396
    35  func processChunkG1BatchAffine[BJE ibg1JacExtended, B ibG1Affine, BS bitSet, TP pG1Affine, TPP ppG1Affine, TQ qOpsG1Affine, TC cG1Affine](
    36  	chunk uint64,
    37  	chRes chan<- g1JacExtended,
    38  	c uint64,
    39  	points []G1Affine,
    40  	digits []uint16,
    41  	sem chan struct{}) {
    42  
    43  	if sem != nil {
    44  		// if we are limited, wait for a token in the semaphore
    45  		<-sem
    46  	}
    47  
    48  	// the batch affine addition needs independent points; in other words, for a window of batchSize
    49  	// we want to hit independent bucketIDs when processing the digit. if there is a conflict (we're trying
    50  	// to add 2 different points to the same bucket), then we push the conflicted point to a queue.
    51  	// each time the batch is full, we execute it, and tentatively put the points (if not conflict)
    52  	// from the top of the queue into the next batch.
    53  	// if the queue is full, we "flush it"; we sequentially add the points to the buckets in
    54  	// g1JacExtended coordinates.
    55  	// The reasoning behind this is the following; batchSize is chosen such as, for a uniformly random
    56  	// input, the number of conflicts is going to be low, and the element added to the queue should be immediately
    57  	// processed in the next batch. If it's not the case, then our inputs are not random; and we fallback to
    58  	// non-batch-affine version.
    59  
    60  	// note that we have 2 sets of buckets
    61  	// 1 in G1Affine used with the batch affine additions
    62  	// 1 in g1JacExtended used in case the queue of conflicting points
    63  	var buckets B // in G1Affine coordinates, infinity point is represented as (0,0), no need to init
    64  	var bucketsJE BJE
    65  	for i := 0; i < len(buckets); i++ {
    66  		bucketsJE[i].setInfinity()
    67  	}
    68  
    69  	// setup for the batch affine;
    70  	var (
    71  		bucketIds BS  // bitSet to signify presence of a bucket in current batch
    72  		cptAdd    int // count the number of bucket + point added to current batch
    73  		R         TPP // bucket references
    74  		P         TP  // points to be added to R (buckets); it is beneficial to store them on the stack (ie copy)
    75  		queue     TQ  // queue of points that conflict the current batch
    76  		qID       int // current position in queue
    77  	)
    78  
    79  	batchSize := len(P)
    80  
    81  	isFull := func() bool { return cptAdd == batchSize }
    82  
    83  	executeAndReset := func() {
    84  		batchAddG1Affine[TP, TPP, TC](&R, &P, cptAdd)
    85  		var tmp BS
    86  		bucketIds = tmp
    87  		cptAdd = 0
    88  	}
    89  
    90  	addFromQueue := func(op batchOpG1Affine) {
    91  		// @precondition: must ensures bucket is not "used" in current batch
    92  		// note that there is a bit of duplicate logic between add and addFromQueue
    93  		// the reason is that as of Go 1.19.3, if we pass a pointer to the queue item (see add signature)
    94  		// the compiler will put the queue on the heap.
    95  		BK := &buckets[op.bucketID]
    96  
    97  		// handle special cases with inf or -P / P
    98  		if BK.IsInfinity() {
    99  			BK.Set(&op.point)
   100  			return
   101  		}
   102  		if BK.X.Equal(&op.point.X) {
   103  			if BK.Y.Equal(&op.point.Y) {
   104  				// P + P: doubling, which should be quite rare --
   105  				// we use the other set of buckets
   106  				bucketsJE[op.bucketID].addMixed(&op.point)
   107  				return
   108  			}
   109  			BK.setInfinity()
   110  			return
   111  		}
   112  
   113  		bucketIds[op.bucketID] = true
   114  		R[cptAdd] = BK
   115  		P[cptAdd] = op.point
   116  		cptAdd++
   117  	}
   118  
   119  	add := func(bucketID uint16, PP *G1Affine, isAdd bool) {
   120  		// @precondition: ensures bucket is not "used" in current batch
   121  		BK := &buckets[bucketID]
   122  		// handle special cases with inf or -P / P
   123  		if BK.IsInfinity() {
   124  			if isAdd {
   125  				BK.Set(PP)
   126  			} else {
   127  				BK.Neg(PP)
   128  			}
   129  			return
   130  		}
   131  		if BK.X.Equal(&PP.X) {
   132  			if BK.Y.Equal(&PP.Y) {
   133  				// P + P: doubling, which should be quite rare --
   134  				if isAdd {
   135  					bucketsJE[bucketID].addMixed(PP)
   136  				} else {
   137  					BK.setInfinity()
   138  				}
   139  				return
   140  			}
   141  			if isAdd {
   142  				BK.setInfinity()
   143  			} else {
   144  				bucketsJE[bucketID].subMixed(PP)
   145  			}
   146  			return
   147  		}
   148  
   149  		bucketIds[bucketID] = true
   150  		R[cptAdd] = BK
   151  		if isAdd {
   152  			P[cptAdd].Set(PP)
   153  		} else {
   154  			P[cptAdd].Neg(PP)
   155  		}
   156  		cptAdd++
   157  	}
   158  
   159  	flushQueue := func() {
   160  		for i := 0; i < qID; i++ {
   161  			bucketsJE[queue[i].bucketID].addMixed(&queue[i].point)
   162  		}
   163  		qID = 0
   164  	}
   165  
   166  	processTopQueue := func() {
   167  		for i := qID - 1; i >= 0; i-- {
   168  			if bucketIds[queue[i].bucketID] {
   169  				return
   170  			}
   171  			addFromQueue(queue[i])
   172  			// len(queue) < batchSize so no need to check for full batch.
   173  			qID--
   174  		}
   175  	}
   176  
   177  	for i, digit := range digits {
   178  
   179  		if digit == 0 || points[i].IsInfinity() {
   180  			continue
   181  		}
   182  
   183  		bucketID := uint16((digit >> 1))
   184  		isAdd := digit&1 == 0
   185  		if isAdd {
   186  			// add
   187  			bucketID -= 1
   188  		}
   189  
   190  		if bucketIds[bucketID] {
   191  			// put it in queue
   192  			queue[qID].bucketID = bucketID
   193  			if isAdd {
   194  				queue[qID].point.Set(&points[i])
   195  			} else {
   196  				queue[qID].point.Neg(&points[i])
   197  			}
   198  			qID++
   199  
   200  			// queue is full, flush it.
   201  			if qID == len(queue)-1 {
   202  				flushQueue()
   203  			}
   204  			continue
   205  		}
   206  
   207  		// we add the point to the batch.
   208  		add(bucketID, &points[i], isAdd)
   209  		if isFull() {
   210  			executeAndReset()
   211  			processTopQueue()
   212  		}
   213  	}
   214  
   215  	// flush items in batch.
   216  	executeAndReset()
   217  
   218  	// empty the queue
   219  	flushQueue()
   220  
   221  	// reduce buckets into total
   222  	// total =  bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1]
   223  	var runningSum, total g1JacExtended
   224  	runningSum.setInfinity()
   225  	total.setInfinity()
   226  	for k := len(buckets) - 1; k >= 0; k-- {
   227  		runningSum.addMixed(&buckets[k])
   228  		if !bucketsJE[k].IsInfinity() {
   229  			runningSum.add(&bucketsJE[k])
   230  		}
   231  		total.add(&runningSum)
   232  	}
   233  
   234  	if sem != nil {
   235  		// release a token to the semaphore
   236  		// before sending to chRes
   237  		sem <- struct{}{}
   238  	}
   239  
   240  	chRes <- total
   241  
   242  }
   243  
   244  // we declare the buckets as fixed-size array types
   245  // this allow us to allocate the buckets on the stack
   246  type bucketG1AffineC10 [512]G1Affine
   247  type bucketG1AffineC11 [1024]G1Affine
   248  type bucketG1AffineC12 [2048]G1Affine
   249  type bucketG1AffineC13 [4096]G1Affine
   250  type bucketG1AffineC14 [8192]G1Affine
   251  type bucketG1AffineC15 [16384]G1Affine
   252  type bucketG1AffineC16 [32768]G1Affine
   253  
   254  // buckets: array of G1Affine points of size 1 << (c-1)
   255  type ibG1Affine interface {
   256  	bucketG1AffineC10 |
   257  		bucketG1AffineC11 |
   258  		bucketG1AffineC12 |
   259  		bucketG1AffineC13 |
   260  		bucketG1AffineC14 |
   261  		bucketG1AffineC15 |
   262  		bucketG1AffineC16
   263  }
   264  
   265  // array of coordinates fp.Element
   266  type cG1Affine interface {
   267  	cG1AffineC10 |
   268  		cG1AffineC11 |
   269  		cG1AffineC12 |
   270  		cG1AffineC13 |
   271  		cG1AffineC14 |
   272  		cG1AffineC15 |
   273  		cG1AffineC16
   274  }
   275  
   276  // buckets: array of G1Affine points (for the batch addition)
   277  type pG1Affine interface {
   278  	pG1AffineC10 |
   279  		pG1AffineC11 |
   280  		pG1AffineC12 |
   281  		pG1AffineC13 |
   282  		pG1AffineC14 |
   283  		pG1AffineC15 |
   284  		pG1AffineC16
   285  }
   286  
   287  // buckets: array of *G1Affine points (for the batch addition)
   288  type ppG1Affine interface {
   289  	ppG1AffineC10 |
   290  		ppG1AffineC11 |
   291  		ppG1AffineC12 |
   292  		ppG1AffineC13 |
   293  		ppG1AffineC14 |
   294  		ppG1AffineC15 |
   295  		ppG1AffineC16
   296  }
   297  
   298  // buckets: array of G1Affine queue operations (for the batch addition)
   299  type qOpsG1Affine interface {
   300  	qG1AffineC10 |
   301  		qG1AffineC11 |
   302  		qG1AffineC12 |
   303  		qG1AffineC13 |
   304  		qG1AffineC14 |
   305  		qG1AffineC15 |
   306  		qG1AffineC16
   307  }
   308  
   309  // batch size 80 when c = 10
   310  type cG1AffineC10 [80]fp.Element
   311  type pG1AffineC10 [80]G1Affine
   312  type ppG1AffineC10 [80]*G1Affine
   313  type qG1AffineC10 [80]batchOpG1Affine
   314  
   315  // batch size 150 when c = 11
   316  type cG1AffineC11 [150]fp.Element
   317  type pG1AffineC11 [150]G1Affine
   318  type ppG1AffineC11 [150]*G1Affine
   319  type qG1AffineC11 [150]batchOpG1Affine
   320  
   321  // batch size 200 when c = 12
   322  type cG1AffineC12 [200]fp.Element
   323  type pG1AffineC12 [200]G1Affine
   324  type ppG1AffineC12 [200]*G1Affine
   325  type qG1AffineC12 [200]batchOpG1Affine
   326  
   327  // batch size 350 when c = 13
   328  type cG1AffineC13 [350]fp.Element
   329  type pG1AffineC13 [350]G1Affine
   330  type ppG1AffineC13 [350]*G1Affine
   331  type qG1AffineC13 [350]batchOpG1Affine
   332  
   333  // batch size 400 when c = 14
   334  type cG1AffineC14 [400]fp.Element
   335  type pG1AffineC14 [400]G1Affine
   336  type ppG1AffineC14 [400]*G1Affine
   337  type qG1AffineC14 [400]batchOpG1Affine
   338  
   339  // batch size 500 when c = 15
   340  type cG1AffineC15 [500]fp.Element
   341  type pG1AffineC15 [500]G1Affine
   342  type ppG1AffineC15 [500]*G1Affine
   343  type qG1AffineC15 [500]batchOpG1Affine
   344  
   345  // batch size 640 when c = 16
   346  type cG1AffineC16 [640]fp.Element
   347  type pG1AffineC16 [640]G1Affine
   348  type ppG1AffineC16 [640]*G1Affine
   349  type qG1AffineC16 [640]batchOpG1Affine
   350  
   351  type batchOpG2Affine struct {
   352  	bucketID uint16
   353  	point    G2Affine
   354  }
   355  
   356  // processChunkG2BatchAffine process a chunk of the scalars during the msm
   357  // using affine coordinates for the buckets. To amortize the cost of the inverse in the affine addition
   358  // we use a batch affine addition.
   359  //
   360  // this is derived from a PR by 0x0ece : https://github.com/ConsenSys/gnark-crypto/pull/249
   361  // See Section 5.3: ia.cr/2022/1396
   362  func processChunkG2BatchAffine[BJE ibg2JacExtended, B ibG2Affine, BS bitSet, TP pG2Affine, TPP ppG2Affine, TQ qOpsG2Affine, TC cG2Affine](
   363  	chunk uint64,
   364  	chRes chan<- g2JacExtended,
   365  	c uint64,
   366  	points []G2Affine,
   367  	digits []uint16,
   368  	sem chan struct{}) {
   369  
   370  	if sem != nil {
   371  		// if we are limited, wait for a token in the semaphore
   372  		<-sem
   373  	}
   374  
   375  	// the batch affine addition needs independent points; in other words, for a window of batchSize
   376  	// we want to hit independent bucketIDs when processing the digit. if there is a conflict (we're trying
   377  	// to add 2 different points to the same bucket), then we push the conflicted point to a queue.
   378  	// each time the batch is full, we execute it, and tentatively put the points (if not conflict)
   379  	// from the top of the queue into the next batch.
   380  	// if the queue is full, we "flush it"; we sequentially add the points to the buckets in
   381  	// g2JacExtended coordinates.
   382  	// The reasoning behind this is the following; batchSize is chosen such as, for a uniformly random
   383  	// input, the number of conflicts is going to be low, and the element added to the queue should be immediately
   384  	// processed in the next batch. If it's not the case, then our inputs are not random; and we fallback to
   385  	// non-batch-affine version.
   386  
   387  	// note that we have 2 sets of buckets
   388  	// 1 in G2Affine used with the batch affine additions
   389  	// 1 in g2JacExtended used in case the queue of conflicting points
   390  	var buckets B // in G2Affine coordinates, infinity point is represented as (0,0), no need to init
   391  	var bucketsJE BJE
   392  	for i := 0; i < len(buckets); i++ {
   393  		bucketsJE[i].setInfinity()
   394  	}
   395  
   396  	// setup for the batch affine;
   397  	var (
   398  		bucketIds BS  // bitSet to signify presence of a bucket in current batch
   399  		cptAdd    int // count the number of bucket + point added to current batch
   400  		R         TPP // bucket references
   401  		P         TP  // points to be added to R (buckets); it is beneficial to store them on the stack (ie copy)
   402  		queue     TQ  // queue of points that conflict the current batch
   403  		qID       int // current position in queue
   404  	)
   405  
   406  	batchSize := len(P)
   407  
   408  	isFull := func() bool { return cptAdd == batchSize }
   409  
   410  	executeAndReset := func() {
   411  		batchAddG2Affine[TP, TPP, TC](&R, &P, cptAdd)
   412  		var tmp BS
   413  		bucketIds = tmp
   414  		cptAdd = 0
   415  	}
   416  
   417  	addFromQueue := func(op batchOpG2Affine) {
   418  		// @precondition: must ensures bucket is not "used" in current batch
   419  		// note that there is a bit of duplicate logic between add and addFromQueue
   420  		// the reason is that as of Go 1.19.3, if we pass a pointer to the queue item (see add signature)
   421  		// the compiler will put the queue on the heap.
   422  		BK := &buckets[op.bucketID]
   423  
   424  		// handle special cases with inf or -P / P
   425  		if BK.IsInfinity() {
   426  			BK.Set(&op.point)
   427  			return
   428  		}
   429  		if BK.X.Equal(&op.point.X) {
   430  			if BK.Y.Equal(&op.point.Y) {
   431  				// P + P: doubling, which should be quite rare --
   432  				// we use the other set of buckets
   433  				bucketsJE[op.bucketID].addMixed(&op.point)
   434  				return
   435  			}
   436  			BK.setInfinity()
   437  			return
   438  		}
   439  
   440  		bucketIds[op.bucketID] = true
   441  		R[cptAdd] = BK
   442  		P[cptAdd] = op.point
   443  		cptAdd++
   444  	}
   445  
   446  	add := func(bucketID uint16, PP *G2Affine, isAdd bool) {
   447  		// @precondition: ensures bucket is not "used" in current batch
   448  		BK := &buckets[bucketID]
   449  		// handle special cases with inf or -P / P
   450  		if BK.IsInfinity() {
   451  			if isAdd {
   452  				BK.Set(PP)
   453  			} else {
   454  				BK.Neg(PP)
   455  			}
   456  			return
   457  		}
   458  		if BK.X.Equal(&PP.X) {
   459  			if BK.Y.Equal(&PP.Y) {
   460  				// P + P: doubling, which should be quite rare --
   461  				if isAdd {
   462  					bucketsJE[bucketID].addMixed(PP)
   463  				} else {
   464  					BK.setInfinity()
   465  				}
   466  				return
   467  			}
   468  			if isAdd {
   469  				BK.setInfinity()
   470  			} else {
   471  				bucketsJE[bucketID].subMixed(PP)
   472  			}
   473  			return
   474  		}
   475  
   476  		bucketIds[bucketID] = true
   477  		R[cptAdd] = BK
   478  		if isAdd {
   479  			P[cptAdd].Set(PP)
   480  		} else {
   481  			P[cptAdd].Neg(PP)
   482  		}
   483  		cptAdd++
   484  	}
   485  
   486  	flushQueue := func() {
   487  		for i := 0; i < qID; i++ {
   488  			bucketsJE[queue[i].bucketID].addMixed(&queue[i].point)
   489  		}
   490  		qID = 0
   491  	}
   492  
   493  	processTopQueue := func() {
   494  		for i := qID - 1; i >= 0; i-- {
   495  			if bucketIds[queue[i].bucketID] {
   496  				return
   497  			}
   498  			addFromQueue(queue[i])
   499  			// len(queue) < batchSize so no need to check for full batch.
   500  			qID--
   501  		}
   502  	}
   503  
   504  	for i, digit := range digits {
   505  
   506  		if digit == 0 || points[i].IsInfinity() {
   507  			continue
   508  		}
   509  
   510  		bucketID := uint16((digit >> 1))
   511  		isAdd := digit&1 == 0
   512  		if isAdd {
   513  			// add
   514  			bucketID -= 1
   515  		}
   516  
   517  		if bucketIds[bucketID] {
   518  			// put it in queue
   519  			queue[qID].bucketID = bucketID
   520  			if isAdd {
   521  				queue[qID].point.Set(&points[i])
   522  			} else {
   523  				queue[qID].point.Neg(&points[i])
   524  			}
   525  			qID++
   526  
   527  			// queue is full, flush it.
   528  			if qID == len(queue)-1 {
   529  				flushQueue()
   530  			}
   531  			continue
   532  		}
   533  
   534  		// we add the point to the batch.
   535  		add(bucketID, &points[i], isAdd)
   536  		if isFull() {
   537  			executeAndReset()
   538  			processTopQueue()
   539  		}
   540  	}
   541  
   542  	// flush items in batch.
   543  	executeAndReset()
   544  
   545  	// empty the queue
   546  	flushQueue()
   547  
   548  	// reduce buckets into total
   549  	// total =  bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1]
   550  	var runningSum, total g2JacExtended
   551  	runningSum.setInfinity()
   552  	total.setInfinity()
   553  	for k := len(buckets) - 1; k >= 0; k-- {
   554  		runningSum.addMixed(&buckets[k])
   555  		if !bucketsJE[k].IsInfinity() {
   556  			runningSum.add(&bucketsJE[k])
   557  		}
   558  		total.add(&runningSum)
   559  	}
   560  
   561  	if sem != nil {
   562  		// release a token to the semaphore
   563  		// before sending to chRes
   564  		sem <- struct{}{}
   565  	}
   566  
   567  	chRes <- total
   568  
   569  }
   570  
   571  // we declare the buckets as fixed-size array types
   572  // this allow us to allocate the buckets on the stack
   573  type bucketG2AffineC10 [512]G2Affine
   574  type bucketG2AffineC11 [1024]G2Affine
   575  type bucketG2AffineC12 [2048]G2Affine
   576  type bucketG2AffineC13 [4096]G2Affine
   577  type bucketG2AffineC14 [8192]G2Affine
   578  type bucketG2AffineC15 [16384]G2Affine
   579  type bucketG2AffineC16 [32768]G2Affine
   580  
   581  // buckets: array of G2Affine points of size 1 << (c-1)
   582  type ibG2Affine interface {
   583  	bucketG2AffineC10 |
   584  		bucketG2AffineC11 |
   585  		bucketG2AffineC12 |
   586  		bucketG2AffineC13 |
   587  		bucketG2AffineC14 |
   588  		bucketG2AffineC15 |
   589  		bucketG2AffineC16
   590  }
   591  
   592  // array of coordinates fptower.E2
   593  type cG2Affine interface {
   594  	cG2AffineC10 |
   595  		cG2AffineC11 |
   596  		cG2AffineC12 |
   597  		cG2AffineC13 |
   598  		cG2AffineC14 |
   599  		cG2AffineC15 |
   600  		cG2AffineC16
   601  }
   602  
   603  // buckets: array of G2Affine points (for the batch addition)
   604  type pG2Affine interface {
   605  	pG2AffineC10 |
   606  		pG2AffineC11 |
   607  		pG2AffineC12 |
   608  		pG2AffineC13 |
   609  		pG2AffineC14 |
   610  		pG2AffineC15 |
   611  		pG2AffineC16
   612  }
   613  
   614  // buckets: array of *G2Affine points (for the batch addition)
   615  type ppG2Affine interface {
   616  	ppG2AffineC10 |
   617  		ppG2AffineC11 |
   618  		ppG2AffineC12 |
   619  		ppG2AffineC13 |
   620  		ppG2AffineC14 |
   621  		ppG2AffineC15 |
   622  		ppG2AffineC16
   623  }
   624  
   625  // buckets: array of G2Affine queue operations (for the batch addition)
   626  type qOpsG2Affine interface {
   627  	qG2AffineC10 |
   628  		qG2AffineC11 |
   629  		qG2AffineC12 |
   630  		qG2AffineC13 |
   631  		qG2AffineC14 |
   632  		qG2AffineC15 |
   633  		qG2AffineC16
   634  }
   635  
   636  // batch size 80 when c = 10
   637  type cG2AffineC10 [80]fptower.E2
   638  type pG2AffineC10 [80]G2Affine
   639  type ppG2AffineC10 [80]*G2Affine
   640  type qG2AffineC10 [80]batchOpG2Affine
   641  
   642  // batch size 150 when c = 11
   643  type cG2AffineC11 [150]fptower.E2
   644  type pG2AffineC11 [150]G2Affine
   645  type ppG2AffineC11 [150]*G2Affine
   646  type qG2AffineC11 [150]batchOpG2Affine
   647  
   648  // batch size 200 when c = 12
   649  type cG2AffineC12 [200]fptower.E2
   650  type pG2AffineC12 [200]G2Affine
   651  type ppG2AffineC12 [200]*G2Affine
   652  type qG2AffineC12 [200]batchOpG2Affine
   653  
   654  // batch size 350 when c = 13
   655  type cG2AffineC13 [350]fptower.E2
   656  type pG2AffineC13 [350]G2Affine
   657  type ppG2AffineC13 [350]*G2Affine
   658  type qG2AffineC13 [350]batchOpG2Affine
   659  
   660  // batch size 400 when c = 14
   661  type cG2AffineC14 [400]fptower.E2
   662  type pG2AffineC14 [400]G2Affine
   663  type ppG2AffineC14 [400]*G2Affine
   664  type qG2AffineC14 [400]batchOpG2Affine
   665  
   666  // batch size 500 when c = 15
   667  type cG2AffineC15 [500]fptower.E2
   668  type pG2AffineC15 [500]G2Affine
   669  type ppG2AffineC15 [500]*G2Affine
   670  type qG2AffineC15 [500]batchOpG2Affine
   671  
   672  // batch size 640 when c = 16
   673  type cG2AffineC16 [640]fptower.E2
   674  type pG2AffineC16 [640]G2Affine
   675  type ppG2AffineC16 [640]*G2Affine
   676  type qG2AffineC16 [640]batchOpG2Affine
   677  
   678  type bitSetC2 [2]bool
   679  type bitSetC3 [4]bool
   680  type bitSetC4 [8]bool
   681  type bitSetC5 [16]bool
   682  type bitSetC6 [32]bool
   683  type bitSetC7 [64]bool
   684  type bitSetC8 [128]bool
   685  type bitSetC9 [256]bool
   686  type bitSetC10 [512]bool
   687  type bitSetC11 [1024]bool
   688  type bitSetC12 [2048]bool
   689  type bitSetC13 [4096]bool
   690  type bitSetC14 [8192]bool
   691  type bitSetC15 [16384]bool
   692  type bitSetC16 [32768]bool
   693  
   694  type bitSet interface {
   695  	bitSetC2 |
   696  		bitSetC3 |
   697  		bitSetC4 |
   698  		bitSetC5 |
   699  		bitSetC6 |
   700  		bitSetC7 |
   701  		bitSetC8 |
   702  		bitSetC9 |
   703  		bitSetC10 |
   704  		bitSetC11 |
   705  		bitSetC12 |
   706  		bitSetC13 |
   707  		bitSetC14 |
   708  		bitSetC15 |
   709  		bitSetC16
   710  }