github.com/consensys/gnark-crypto@v0.14.0/internal/generator/ecc/template/multiexp_affine.go.tmpl (about)

     1  {{ $G1TAffine := print (toUpper .G1.PointName) "Affine" }}
     2  {{ $G1TJacobian := print (toUpper .G1.PointName) "Jac" }}
     3  {{ $G1TJacobianExtended := print (toLower .G1.PointName) "JacExtended" }}
     4  
     5  {{ $G2TAffine := print (toUpper .G2.PointName) "Affine" }}
     6  {{ $G2TJacobian := print (toUpper .G2.PointName) "Jac" }}
     7  {{ $G2TJacobianExtended := print (toLower .G2.PointName) "JacExtended" }}
     8  
     9  
    10  import (
    11  	"github.com/consensys/gnark-crypto/ecc/{{.Name}}/fp"
    12  	{{- if and (ne .G1.CoordType .G2.CoordType) (ne .Name "secp256k1") }}
    13  	"github.com/consensys/gnark-crypto/ecc/{{.Name}}/internal/fptower"
    14  	{{- end}}
    15  )
    16  
    17  {{ template "multiexp" dict "CoordType" .G1.CoordType "PointName" .G1.PointName "UPointName" (toUpper .G1.PointName) "TAffine" $G1TAffine "TJacobian" $G1TJacobian "TJacobianExtended" $G1TJacobianExtended "FrNbWords" .Fr.NbWords "CRange" .G1.CRange}}
    18  {{- if ne .Name "secp256k1"}}
    19  {{ template "multiexp" dict "CoordType" .G2.CoordType "PointName" .G2.PointName "UPointName" (toUpper .G2.PointName) "TAffine" $G2TAffine "TJacobian" $G2TJacobian "TJacobianExtended" $G2TJacobianExtended "FrNbWords" .Fr.NbWords "CRange" .G2.CRange}}
    20  {{- end}}
    21  
    22  
    23  {{define "multiexp" }}
    24  
    25  type batchOp{{ $.TAffine }} struct {
    26  	bucketID uint16
    27  	point {{ $.TAffine }}
    28  }
    29  
    30  // processChunk{{ $.UPointName }}BatchAffine process a chunk of the scalars during the msm
    31  // using affine coordinates for the buckets. To amortize the cost of the inverse in the affine addition
    32  // we use a batch affine addition.
    33  //
    34  // this is derived from a PR by 0x0ece : https://github.com/ConsenSys/gnark-crypto/pull/249
    35  // See Section 5.3: ia.cr/2022/1396
    36  func processChunk{{ $.UPointName }}BatchAffine[BJE ib{{ $.TJacobianExtended }},B ib{{ $.TAffine }}, BS bitSet, TP p{{ $.TAffine }}, TPP pp{{ $.TAffine }}, TQ qOps{{ $.TAffine }}, TC c{{ $.TAffine}}](
    37  	 chunk uint64,
    38  	 chRes chan<- {{ $.TJacobianExtended }},
    39  	 c uint64,
    40  	 points []{{ $.TAffine }},
    41  	 digits []uint16,
    42  	 sem chan struct{}) {
    43  
    44  	if sem != nil {
    45  		// if we are limited, wait for a token in the semaphore
    46  		<-sem
    47  	}
    48  
    49  	// the batch affine addition needs independent points; in other words, for a window of batchSize
    50  	// we want to hit independent bucketIDs when processing the digit. if there is a conflict (we're trying
    51  	// to add 2 different points to the same bucket), then we push the conflicted point to a queue.
    52  	// each time the batch is full, we execute it, and tentatively put the points (if not conflict)
    53  	// from the top of the queue into the next batch.
    54  	// if the queue is full, we "flush it"; we sequentially add the points to the buckets in
    55  	// {{ $.TJacobianExtended }} coordinates.
    56  	// The reasoning behind this is the following; batchSize is chosen such as, for a uniformly random
    57  	// input, the number of conflicts is going to be low, and the element added to the queue should be immediately
    58  	// processed in the next batch. If it's not the case, then our inputs are not random; and we fallback to
    59  	// non-batch-affine version.
    60  
    61  	// note that we have 2 sets of buckets
    62  	// 1 in {{ $.TAffine }} used with the batch affine additions
    63  	// 1 in {{ $.TJacobianExtended }} used in case the queue of conflicting points
    64  	var buckets B // in {{ $.TAffine }} coordinates, infinity point is represented as (0,0), no need to init
    65  	var bucketsJE BJE
    66  	for i := 0; i < len(buckets); i++ {
    67  		bucketsJE[i].setInfinity()
    68  	}
    69  
    70  	// setup for the batch affine;
    71  	var (
    72  		bucketIds BS // bitSet to signify presence of a bucket in current batch
    73  		cptAdd int // count the number of bucket + point added to current batch
    74  		R TPP // bucket references
    75  		P TP // points to be added to R (buckets); it is beneficial to store them on the stack (ie copy)
    76  		queue TQ // queue of points that conflict the current batch
    77  		qID int // current position in queue
    78  	)
    79  
    80  	batchSize := len(P)
    81  
    82  	isFull := func() bool {	return cptAdd == batchSize}
    83  
    84  	executeAndReset := func ()  {
    85  		batchAdd{{ $.TAffine }}[TP, TPP, TC](&R, &P, cptAdd)
    86  		var tmp BS
    87  		bucketIds = tmp
    88  		cptAdd = 0
    89  	}
    90  
    91  	addFromQueue := func(op batchOp{{ $.TAffine }}) {
    92  		// @precondition: must ensures bucket is not "used" in current batch
    93  		// note that there is a bit of duplicate logic between add and addFromQueue
    94  		// the reason is that as of Go 1.19.3, if we pass a pointer to the queue item (see add signature)
    95  		// the compiler will put the queue on the heap.
    96  		BK := &buckets[op.bucketID]
    97  
    98  		// handle special cases with inf or -P / P
    99  		if BK.IsInfinity()  {
   100  			BK.Set(&op.point)
   101  			return
   102  		}
   103  		if BK.X.Equal(&op.point.X) {
   104  			if BK.Y.Equal(&op.point.Y) {
   105  				// P + P: doubling, which should be quite rare --
   106  				// we use the other set of buckets
   107  				bucketsJE[op.bucketID].addMixed(&op.point)
   108  				return
   109  			}
   110  			BK.setInfinity()
   111  			return
   112  		}
   113  
   114  		bucketIds[op.bucketID] = true
   115  		R[cptAdd] = BK
   116  		P[cptAdd] = op.point
   117  		cptAdd++
   118  	}
   119  
   120  	add := func(bucketID uint16, PP *{{$.TAffine}}, isAdd bool) {
   121  		// @precondition: ensures bucket is not "used" in current batch
   122  		BK := &buckets[bucketID]
   123  		// handle special cases with inf or -P / P
   124  		if BK.IsInfinity()  {
   125  			if isAdd {
   126  				BK.Set(PP)
   127  			} else {
   128  				BK.Neg(PP)
   129  			}
   130  			return
   131  		}
   132  		if BK.X.Equal(&PP.X) {
   133  			if BK.Y.Equal(&PP.Y) {
   134  				// P + P: doubling, which should be quite rare --
   135  				if isAdd {
   136  					bucketsJE[bucketID].addMixed(PP)
   137  				} else {
   138  					BK.setInfinity()
   139  				}
   140  				return
   141  			}
   142  			if isAdd {
   143  				BK.setInfinity()
   144  			} else {
   145  				bucketsJE[bucketID].subMixed(PP)
   146  			}
   147  			return
   148  		}
   149  
   150  		bucketIds[bucketID] = true
   151  		R[cptAdd] = BK
   152  		if isAdd {
   153  			P[cptAdd].Set(PP)
   154  		} else {
   155  			P[cptAdd].Neg(PP)
   156  		}
   157  		cptAdd++
   158  	}
   159  
   160  	flushQueue := func () {
   161  		for i:=0; i < qID; i++ {
   162  			bucketsJE[queue[i].bucketID].addMixed(&queue[i].point)
   163  		}
   164  		qID = 0
   165  	}
   166  
   167  	processTopQueue := func () {
   168  		for i := qID - 1; i >= 0; i-- {
   169  			if bucketIds[queue[i].bucketID] {
   170  				return
   171  			}
   172  			addFromQueue(queue[i])
   173  			// len(queue) < batchSize so no need to check for full batch.
   174  			qID--
   175  		}
   176  	}
   177  
   178  
   179  	for i, digit := range digits {
   180  
   181  		if digit == 0 || points[i].IsInfinity() {
   182  			continue
   183  		}
   184  
   185  		bucketID := uint16((digit>>1))
   186  		isAdd := digit&1 == 0
   187  		if isAdd {
   188  			// add
   189  			bucketID-=1
   190  		}
   191  
   192  		if bucketIds[bucketID] {
   193  			// put it in queue
   194  			queue[qID].bucketID = bucketID
   195  			if isAdd {
   196  				queue[qID].point.Set(&points[i])
   197  			} else {
   198  				queue[qID].point.Neg(&points[i])
   199  			}
   200  			qID++
   201  
   202  			// queue is full, flush it.
   203  			if qID == len(queue) - 1 {
   204  				flushQueue()
   205  			}
   206  			continue
   207  		}
   208  
   209  		// we add the point to the batch.
   210  		add(bucketID, &points[i], isAdd)
   211  		if isFull() {
   212  			executeAndReset()
   213  			processTopQueue()
   214  		}
   215  	}
   216  
   217  
   218  	// flush items in batch.
   219  	executeAndReset()
   220  
   221  	// empty the queue
   222  	flushQueue()
   223  
   224  
   225  	// reduce buckets into total
   226  	// total =  bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1]
   227  	var runningSum, total {{ $.TJacobianExtended }}
   228  	runningSum.setInfinity()
   229  	total.setInfinity()
   230  	for k := len(buckets) - 1; k >= 0; k-- {
   231  		runningSum.addMixed(&buckets[k])
   232  		if !bucketsJE[k].IsInfinity() {
   233  			runningSum.add(&bucketsJE[k])
   234  		}
   235  		total.add(&runningSum)
   236  	}
   237  
   238  
   239  	if sem != nil {
   240  		// release a token to the semaphore
   241  		// before sending to chRes
   242  		sem <- struct{}{}
   243  	}
   244  
   245  	chRes <- total
   246  
   247  }
   248  
   249  // we declare the buckets as fixed-size array types
   250  // this allow us to allocate the buckets on the stack
   251  {{- range $c :=  $.CRange}}
   252  {{- if gt $c 9}}
   253  type bucket{{ $.TAffine }}C{{$c}} [{{nbBuckets $c}}]{{ $.TAffine }}
   254  {{- end}}
   255  {{- end}}
   256  
   257  
   258  // buckets: array of {{ $.TAffine }} points of size 1 << (c-1)
   259  type ib{{ $.TAffine }} interface {
   260  	{{- range $i, $c :=  $.CRange}}
   261  	{{- if gt $c 9}}
   262  	bucket{{ $.TAffine }}C{{$c}} {{- if not (last $i $.CRange)}} | {{- end}}
   263  	{{- end}}
   264  	{{- end}}
   265  }
   266  
   267  // array of coordinates {{ $.CoordType }}
   268  type c{{ $.TAffine }} interface {
   269  	{{- range $i, $c :=  $.CRange}}
   270  	{{- if gt $c 9}}
   271  	c{{ $.TAffine }}C{{$c}} {{- if not (last $i $.CRange)}} | {{- end}}
   272  	{{- end}}
   273  	{{- end}}
   274  }
   275  
   276  // buckets: array of {{ $.TAffine }} points (for the batch addition)
   277  type p{{ $.TAffine }} interface {
   278  	{{- range $i, $c :=  $.CRange}}
   279  	{{- if gt $c 9}}
   280  	p{{ $.TAffine }}C{{$c}} {{- if not (last $i $.CRange)}} | {{- end}}
   281  	{{- end}}
   282  	{{- end}}
   283  }
   284  
   285  // buckets: array of *{{ $.TAffine }} points (for the batch addition)
   286  type pp{{ $.TAffine }} interface {
   287  	{{- range $i, $c :=  $.CRange}}
   288  	{{- if gt $c 9}}
   289  	pp{{ $.TAffine }}C{{$c}} {{- if not (last $i $.CRange)}} | {{- end}}
   290  	{{- end}}
   291  	{{- end}}
   292  }
   293  
   294  // buckets: array of {{ $.TAffine }} queue operations (for the batch addition)
   295  type qOps{{ $.TAffine }} interface {
   296  	{{- range $i, $c :=  $.CRange}}
   297  	{{- if gt $c 9}}
   298  	q{{ $.TAffine }}C{{$c}} {{- if not (last $i $.CRange)}} | {{- end}}
   299  	{{- end}}
   300  	{{- end}}
   301  }
   302  
   303  
   304  {{- range $c :=  $.CRange}}
   305  {{if gt $c 9}}
   306  // batch size {{batchSize $c}} when c = {{$c}}
   307  type c{{ $.TAffine }}C{{$c}} [{{batchSize $c}}]{{ $.CoordType }}
   308  type p{{ $.TAffine }}C{{$c}} [{{batchSize $c}}]{{ $.TAffine }}
   309  type pp{{ $.TAffine }}C{{$c}} [{{batchSize $c}}]*{{ $.TAffine }}
   310  type q{{ $.TAffine }}C{{$c}} [{{batchSize $c}}]batchOp{{ $.TAffine }}
   311  {{- end}}
   312  {{- end}}
   313  
   314  
   315  {{end }}
   316  
   317  {{- range $c :=  $.G1.CRange}}
   318  type bitSetC{{$c}} [{{nbBuckets $c}}]bool
   319  {{- end}}
   320  
   321  type bitSet interface {
   322  	{{- range $i, $c :=  $.G1.CRange}}
   323  	bitSetC{{$c}} {{- if not (last $i $.G1.CRange)}} | {{- end}}
   324  	{{- end}}
   325  }