github.com/consensys/gnark-crypto@v0.14.0/ecc/bw6-633/multiexp_affine.go (about)

     1  // Copyright 2020 Consensys Software Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by consensys/gnark-crypto DO NOT EDIT
    16  
    17  package bw6633
    18  
    19  import (
    20  	"github.com/consensys/gnark-crypto/ecc/bw6-633/fp"
    21  )
    22  
    23  type batchOpG1Affine struct {
    24  	bucketID uint16
    25  	point    G1Affine
    26  }
    27  
    28  // processChunkG1BatchAffine process a chunk of the scalars during the msm
    29  // using affine coordinates for the buckets. To amortize the cost of the inverse in the affine addition
    30  // we use a batch affine addition.
    31  //
    32  // this is derived from a PR by 0x0ece : https://github.com/ConsenSys/gnark-crypto/pull/249
    33  // See Section 5.3: ia.cr/2022/1396
    34  func processChunkG1BatchAffine[BJE ibg1JacExtended, B ibG1Affine, BS bitSet, TP pG1Affine, TPP ppG1Affine, TQ qOpsG1Affine, TC cG1Affine](
    35  	chunk uint64,
    36  	chRes chan<- g1JacExtended,
    37  	c uint64,
    38  	points []G1Affine,
    39  	digits []uint16,
    40  	sem chan struct{}) {
    41  
    42  	if sem != nil {
    43  		// if we are limited, wait for a token in the semaphore
    44  		<-sem
    45  	}
    46  
    47  	// the batch affine addition needs independent points; in other words, for a window of batchSize
    48  	// we want to hit independent bucketIDs when processing the digit. if there is a conflict (we're trying
    49  	// to add 2 different points to the same bucket), then we push the conflicted point to a queue.
    50  	// each time the batch is full, we execute it, and tentatively put the points (if not conflict)
    51  	// from the top of the queue into the next batch.
    52  	// if the queue is full, we "flush it"; we sequentially add the points to the buckets in
    53  	// g1JacExtended coordinates.
    54  	// The reasoning behind this is the following; batchSize is chosen such as, for a uniformly random
    55  	// input, the number of conflicts is going to be low, and the element added to the queue should be immediately
    56  	// processed in the next batch. If it's not the case, then our inputs are not random; and we fallback to
    57  	// non-batch-affine version.
    58  
    59  	// note that we have 2 sets of buckets
    60  	// 1 in G1Affine used with the batch affine additions
    61  	// 1 in g1JacExtended used in case the queue of conflicting points
    62  	var buckets B // in G1Affine coordinates, infinity point is represented as (0,0), no need to init
    63  	var bucketsJE BJE
    64  	for i := 0; i < len(buckets); i++ {
    65  		bucketsJE[i].setInfinity()
    66  	}
    67  
    68  	// setup for the batch affine;
    69  	var (
    70  		bucketIds BS  // bitSet to signify presence of a bucket in current batch
    71  		cptAdd    int // count the number of bucket + point added to current batch
    72  		R         TPP // bucket references
    73  		P         TP  // points to be added to R (buckets); it is beneficial to store them on the stack (ie copy)
    74  		queue     TQ  // queue of points that conflict the current batch
    75  		qID       int // current position in queue
    76  	)
    77  
    78  	batchSize := len(P)
    79  
    80  	isFull := func() bool { return cptAdd == batchSize }
    81  
    82  	executeAndReset := func() {
    83  		batchAddG1Affine[TP, TPP, TC](&R, &P, cptAdd)
    84  		var tmp BS
    85  		bucketIds = tmp
    86  		cptAdd = 0
    87  	}
    88  
    89  	addFromQueue := func(op batchOpG1Affine) {
    90  		// @precondition: must ensures bucket is not "used" in current batch
    91  		// note that there is a bit of duplicate logic between add and addFromQueue
    92  		// the reason is that as of Go 1.19.3, if we pass a pointer to the queue item (see add signature)
    93  		// the compiler will put the queue on the heap.
    94  		BK := &buckets[op.bucketID]
    95  
    96  		// handle special cases with inf or -P / P
    97  		if BK.IsInfinity() {
    98  			BK.Set(&op.point)
    99  			return
   100  		}
   101  		if BK.X.Equal(&op.point.X) {
   102  			if BK.Y.Equal(&op.point.Y) {
   103  				// P + P: doubling, which should be quite rare --
   104  				// we use the other set of buckets
   105  				bucketsJE[op.bucketID].addMixed(&op.point)
   106  				return
   107  			}
   108  			BK.setInfinity()
   109  			return
   110  		}
   111  
   112  		bucketIds[op.bucketID] = true
   113  		R[cptAdd] = BK
   114  		P[cptAdd] = op.point
   115  		cptAdd++
   116  	}
   117  
   118  	add := func(bucketID uint16, PP *G1Affine, isAdd bool) {
   119  		// @precondition: ensures bucket is not "used" in current batch
   120  		BK := &buckets[bucketID]
   121  		// handle special cases with inf or -P / P
   122  		if BK.IsInfinity() {
   123  			if isAdd {
   124  				BK.Set(PP)
   125  			} else {
   126  				BK.Neg(PP)
   127  			}
   128  			return
   129  		}
   130  		if BK.X.Equal(&PP.X) {
   131  			if BK.Y.Equal(&PP.Y) {
   132  				// P + P: doubling, which should be quite rare --
   133  				if isAdd {
   134  					bucketsJE[bucketID].addMixed(PP)
   135  				} else {
   136  					BK.setInfinity()
   137  				}
   138  				return
   139  			}
   140  			if isAdd {
   141  				BK.setInfinity()
   142  			} else {
   143  				bucketsJE[bucketID].subMixed(PP)
   144  			}
   145  			return
   146  		}
   147  
   148  		bucketIds[bucketID] = true
   149  		R[cptAdd] = BK
   150  		if isAdd {
   151  			P[cptAdd].Set(PP)
   152  		} else {
   153  			P[cptAdd].Neg(PP)
   154  		}
   155  		cptAdd++
   156  	}
   157  
   158  	flushQueue := func() {
   159  		for i := 0; i < qID; i++ {
   160  			bucketsJE[queue[i].bucketID].addMixed(&queue[i].point)
   161  		}
   162  		qID = 0
   163  	}
   164  
   165  	processTopQueue := func() {
   166  		for i := qID - 1; i >= 0; i-- {
   167  			if bucketIds[queue[i].bucketID] {
   168  				return
   169  			}
   170  			addFromQueue(queue[i])
   171  			// len(queue) < batchSize so no need to check for full batch.
   172  			qID--
   173  		}
   174  	}
   175  
   176  	for i, digit := range digits {
   177  
   178  		if digit == 0 || points[i].IsInfinity() {
   179  			continue
   180  		}
   181  
   182  		bucketID := uint16((digit >> 1))
   183  		isAdd := digit&1 == 0
   184  		if isAdd {
   185  			// add
   186  			bucketID -= 1
   187  		}
   188  
   189  		if bucketIds[bucketID] {
   190  			// put it in queue
   191  			queue[qID].bucketID = bucketID
   192  			if isAdd {
   193  				queue[qID].point.Set(&points[i])
   194  			} else {
   195  				queue[qID].point.Neg(&points[i])
   196  			}
   197  			qID++
   198  
   199  			// queue is full, flush it.
   200  			if qID == len(queue)-1 {
   201  				flushQueue()
   202  			}
   203  			continue
   204  		}
   205  
   206  		// we add the point to the batch.
   207  		add(bucketID, &points[i], isAdd)
   208  		if isFull() {
   209  			executeAndReset()
   210  			processTopQueue()
   211  		}
   212  	}
   213  
   214  	// flush items in batch.
   215  	executeAndReset()
   216  
   217  	// empty the queue
   218  	flushQueue()
   219  
   220  	// reduce buckets into total
   221  	// total =  bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1]
   222  	var runningSum, total g1JacExtended
   223  	runningSum.setInfinity()
   224  	total.setInfinity()
   225  	for k := len(buckets) - 1; k >= 0; k-- {
   226  		runningSum.addMixed(&buckets[k])
   227  		if !bucketsJE[k].IsInfinity() {
   228  			runningSum.add(&bucketsJE[k])
   229  		}
   230  		total.add(&runningSum)
   231  	}
   232  
   233  	if sem != nil {
   234  		// release a token to the semaphore
   235  		// before sending to chRes
   236  		sem <- struct{}{}
   237  	}
   238  
   239  	chRes <- total
   240  
   241  }
   242  
   243  // we declare the buckets as fixed-size array types
   244  // this allow us to allocate the buckets on the stack
   245  type bucketG1AffineC12 [2048]G1Affine
   246  type bucketG1AffineC16 [32768]G1Affine
   247  
   248  // buckets: array of G1Affine points of size 1 << (c-1)
   249  type ibG1Affine interface {
   250  	bucketG1AffineC12 |
   251  		bucketG1AffineC16
   252  }
   253  
   254  // array of coordinates fp.Element
   255  type cG1Affine interface {
   256  	cG1AffineC12 |
   257  		cG1AffineC16
   258  }
   259  
   260  // buckets: array of G1Affine points (for the batch addition)
   261  type pG1Affine interface {
   262  	pG1AffineC12 |
   263  		pG1AffineC16
   264  }
   265  
   266  // buckets: array of *G1Affine points (for the batch addition)
   267  type ppG1Affine interface {
   268  	ppG1AffineC12 |
   269  		ppG1AffineC16
   270  }
   271  
   272  // buckets: array of G1Affine queue operations (for the batch addition)
   273  type qOpsG1Affine interface {
   274  	qG1AffineC12 |
   275  		qG1AffineC16
   276  }
   277  
   278  // batch size 200 when c = 12
   279  type cG1AffineC12 [200]fp.Element
   280  type pG1AffineC12 [200]G1Affine
   281  type ppG1AffineC12 [200]*G1Affine
   282  type qG1AffineC12 [200]batchOpG1Affine
   283  
   284  // batch size 640 when c = 16
   285  type cG1AffineC16 [640]fp.Element
   286  type pG1AffineC16 [640]G1Affine
   287  type ppG1AffineC16 [640]*G1Affine
   288  type qG1AffineC16 [640]batchOpG1Affine
   289  
   290  type batchOpG2Affine struct {
   291  	bucketID uint16
   292  	point    G2Affine
   293  }
   294  
   295  // processChunkG2BatchAffine process a chunk of the scalars during the msm
   296  // using affine coordinates for the buckets. To amortize the cost of the inverse in the affine addition
   297  // we use a batch affine addition.
   298  //
   299  // this is derived from a PR by 0x0ece : https://github.com/ConsenSys/gnark-crypto/pull/249
   300  // See Section 5.3: ia.cr/2022/1396
   301  func processChunkG2BatchAffine[BJE ibg2JacExtended, B ibG2Affine, BS bitSet, TP pG2Affine, TPP ppG2Affine, TQ qOpsG2Affine, TC cG2Affine](
   302  	chunk uint64,
   303  	chRes chan<- g2JacExtended,
   304  	c uint64,
   305  	points []G2Affine,
   306  	digits []uint16,
   307  	sem chan struct{}) {
   308  
   309  	if sem != nil {
   310  		// if we are limited, wait for a token in the semaphore
   311  		<-sem
   312  	}
   313  
   314  	// the batch affine addition needs independent points; in other words, for a window of batchSize
   315  	// we want to hit independent bucketIDs when processing the digit. if there is a conflict (we're trying
   316  	// to add 2 different points to the same bucket), then we push the conflicted point to a queue.
   317  	// each time the batch is full, we execute it, and tentatively put the points (if not conflict)
   318  	// from the top of the queue into the next batch.
   319  	// if the queue is full, we "flush it"; we sequentially add the points to the buckets in
   320  	// g2JacExtended coordinates.
   321  	// The reasoning behind this is the following; batchSize is chosen such as, for a uniformly random
   322  	// input, the number of conflicts is going to be low, and the element added to the queue should be immediately
   323  	// processed in the next batch. If it's not the case, then our inputs are not random; and we fallback to
   324  	// non-batch-affine version.
   325  
   326  	// note that we have 2 sets of buckets
   327  	// 1 in G2Affine used with the batch affine additions
   328  	// 1 in g2JacExtended used in case the queue of conflicting points
   329  	var buckets B // in G2Affine coordinates, infinity point is represented as (0,0), no need to init
   330  	var bucketsJE BJE
   331  	for i := 0; i < len(buckets); i++ {
   332  		bucketsJE[i].setInfinity()
   333  	}
   334  
   335  	// setup for the batch affine;
   336  	var (
   337  		bucketIds BS  // bitSet to signify presence of a bucket in current batch
   338  		cptAdd    int // count the number of bucket + point added to current batch
   339  		R         TPP // bucket references
   340  		P         TP  // points to be added to R (buckets); it is beneficial to store them on the stack (ie copy)
   341  		queue     TQ  // queue of points that conflict the current batch
   342  		qID       int // current position in queue
   343  	)
   344  
   345  	batchSize := len(P)
   346  
   347  	isFull := func() bool { return cptAdd == batchSize }
   348  
   349  	executeAndReset := func() {
   350  		batchAddG2Affine[TP, TPP, TC](&R, &P, cptAdd)
   351  		var tmp BS
   352  		bucketIds = tmp
   353  		cptAdd = 0
   354  	}
   355  
   356  	addFromQueue := func(op batchOpG2Affine) {
   357  		// @precondition: must ensures bucket is not "used" in current batch
   358  		// note that there is a bit of duplicate logic between add and addFromQueue
   359  		// the reason is that as of Go 1.19.3, if we pass a pointer to the queue item (see add signature)
   360  		// the compiler will put the queue on the heap.
   361  		BK := &buckets[op.bucketID]
   362  
   363  		// handle special cases with inf or -P / P
   364  		if BK.IsInfinity() {
   365  			BK.Set(&op.point)
   366  			return
   367  		}
   368  		if BK.X.Equal(&op.point.X) {
   369  			if BK.Y.Equal(&op.point.Y) {
   370  				// P + P: doubling, which should be quite rare --
   371  				// we use the other set of buckets
   372  				bucketsJE[op.bucketID].addMixed(&op.point)
   373  				return
   374  			}
   375  			BK.setInfinity()
   376  			return
   377  		}
   378  
   379  		bucketIds[op.bucketID] = true
   380  		R[cptAdd] = BK
   381  		P[cptAdd] = op.point
   382  		cptAdd++
   383  	}
   384  
   385  	add := func(bucketID uint16, PP *G2Affine, isAdd bool) {
   386  		// @precondition: ensures bucket is not "used" in current batch
   387  		BK := &buckets[bucketID]
   388  		// handle special cases with inf or -P / P
   389  		if BK.IsInfinity() {
   390  			if isAdd {
   391  				BK.Set(PP)
   392  			} else {
   393  				BK.Neg(PP)
   394  			}
   395  			return
   396  		}
   397  		if BK.X.Equal(&PP.X) {
   398  			if BK.Y.Equal(&PP.Y) {
   399  				// P + P: doubling, which should be quite rare --
   400  				if isAdd {
   401  					bucketsJE[bucketID].addMixed(PP)
   402  				} else {
   403  					BK.setInfinity()
   404  				}
   405  				return
   406  			}
   407  			if isAdd {
   408  				BK.setInfinity()
   409  			} else {
   410  				bucketsJE[bucketID].subMixed(PP)
   411  			}
   412  			return
   413  		}
   414  
   415  		bucketIds[bucketID] = true
   416  		R[cptAdd] = BK
   417  		if isAdd {
   418  			P[cptAdd].Set(PP)
   419  		} else {
   420  			P[cptAdd].Neg(PP)
   421  		}
   422  		cptAdd++
   423  	}
   424  
   425  	flushQueue := func() {
   426  		for i := 0; i < qID; i++ {
   427  			bucketsJE[queue[i].bucketID].addMixed(&queue[i].point)
   428  		}
   429  		qID = 0
   430  	}
   431  
   432  	processTopQueue := func() {
   433  		for i := qID - 1; i >= 0; i-- {
   434  			if bucketIds[queue[i].bucketID] {
   435  				return
   436  			}
   437  			addFromQueue(queue[i])
   438  			// len(queue) < batchSize so no need to check for full batch.
   439  			qID--
   440  		}
   441  	}
   442  
   443  	for i, digit := range digits {
   444  
   445  		if digit == 0 || points[i].IsInfinity() {
   446  			continue
   447  		}
   448  
   449  		bucketID := uint16((digit >> 1))
   450  		isAdd := digit&1 == 0
   451  		if isAdd {
   452  			// add
   453  			bucketID -= 1
   454  		}
   455  
   456  		if bucketIds[bucketID] {
   457  			// put it in queue
   458  			queue[qID].bucketID = bucketID
   459  			if isAdd {
   460  				queue[qID].point.Set(&points[i])
   461  			} else {
   462  				queue[qID].point.Neg(&points[i])
   463  			}
   464  			qID++
   465  
   466  			// queue is full, flush it.
   467  			if qID == len(queue)-1 {
   468  				flushQueue()
   469  			}
   470  			continue
   471  		}
   472  
   473  		// we add the point to the batch.
   474  		add(bucketID, &points[i], isAdd)
   475  		if isFull() {
   476  			executeAndReset()
   477  			processTopQueue()
   478  		}
   479  	}
   480  
   481  	// flush items in batch.
   482  	executeAndReset()
   483  
   484  	// empty the queue
   485  	flushQueue()
   486  
   487  	// reduce buckets into total
   488  	// total =  bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1]
   489  	var runningSum, total g2JacExtended
   490  	runningSum.setInfinity()
   491  	total.setInfinity()
   492  	for k := len(buckets) - 1; k >= 0; k-- {
   493  		runningSum.addMixed(&buckets[k])
   494  		if !bucketsJE[k].IsInfinity() {
   495  			runningSum.add(&bucketsJE[k])
   496  		}
   497  		total.add(&runningSum)
   498  	}
   499  
   500  	if sem != nil {
   501  		// release a token to the semaphore
   502  		// before sending to chRes
   503  		sem <- struct{}{}
   504  	}
   505  
   506  	chRes <- total
   507  
   508  }
   509  
   510  // we declare the buckets as fixed-size array types
   511  // this allow us to allocate the buckets on the stack
   512  type bucketG2AffineC12 [2048]G2Affine
   513  type bucketG2AffineC16 [32768]G2Affine
   514  
   515  // buckets: array of G2Affine points of size 1 << (c-1)
   516  type ibG2Affine interface {
   517  	bucketG2AffineC12 |
   518  		bucketG2AffineC16
   519  }
   520  
   521  // array of coordinates fp.Element
   522  type cG2Affine interface {
   523  	cG2AffineC12 |
   524  		cG2AffineC16
   525  }
   526  
   527  // buckets: array of G2Affine points (for the batch addition)
   528  type pG2Affine interface {
   529  	pG2AffineC12 |
   530  		pG2AffineC16
   531  }
   532  
   533  // buckets: array of *G2Affine points (for the batch addition)
   534  type ppG2Affine interface {
   535  	ppG2AffineC12 |
   536  		ppG2AffineC16
   537  }
   538  
   539  // buckets: array of G2Affine queue operations (for the batch addition)
   540  type qOpsG2Affine interface {
   541  	qG2AffineC12 |
   542  		qG2AffineC16
   543  }
   544  
   545  // batch size 200 when c = 12
   546  type cG2AffineC12 [200]fp.Element
   547  type pG2AffineC12 [200]G2Affine
   548  type ppG2AffineC12 [200]*G2Affine
   549  type qG2AffineC12 [200]batchOpG2Affine
   550  
   551  // batch size 640 when c = 16
   552  type cG2AffineC16 [640]fp.Element
   553  type pG2AffineC16 [640]G2Affine
   554  type ppG2AffineC16 [640]*G2Affine
   555  type qG2AffineC16 [640]batchOpG2Affine
   556  
   557  type bitSetC4 [8]bool
   558  type bitSetC5 [16]bool
   559  type bitSetC6 [32]bool
   560  type bitSetC8 [128]bool
   561  type bitSetC12 [2048]bool
   562  type bitSetC16 [32768]bool
   563  
   564  type bitSet interface {
   565  	bitSetC4 |
   566  		bitSetC5 |
   567  		bitSetC6 |
   568  		bitSetC8 |
   569  		bitSetC12 |
   570  		bitSetC16
   571  }