github.com/consensys/gnark-crypto@v0.14.0/ecc/bw6-761/fr/vector.go (about)

     1  // Copyright 2020 ConsenSys Software Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by consensys/gnark-crypto DO NOT EDIT
    16  
    17  package fr
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"io"
    24  	"runtime"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"unsafe"
    29  )
    30  
    31  // Vector represents a slice of Element.
    32  //
    33  // It implements the following interfaces:
    34  //   - Stringer
    35  //   - io.WriterTo
    36  //   - io.ReaderFrom
    37  //   - encoding.BinaryMarshaler
    38  //   - encoding.BinaryUnmarshaler
    39  //   - sort.Interface
    40  type Vector []Element
    41  
    42  // MarshalBinary implements encoding.BinaryMarshaler
    43  func (vector *Vector) MarshalBinary() (data []byte, err error) {
    44  	var buf bytes.Buffer
    45  
    46  	if _, err = vector.WriteTo(&buf); err != nil {
    47  		return
    48  	}
    49  	return buf.Bytes(), nil
    50  }
    51  
    52  // UnmarshalBinary implements encoding.BinaryUnmarshaler
    53  func (vector *Vector) UnmarshalBinary(data []byte) error {
    54  	r := bytes.NewReader(data)
    55  	_, err := vector.ReadFrom(r)
    56  	return err
    57  }
    58  
    59  // WriteTo implements io.WriterTo and writes a vector of big endian encoded Element.
    60  // Length of the vector is encoded as a uint32 on the first 4 bytes.
    61  func (vector *Vector) WriteTo(w io.Writer) (int64, error) {
    62  	// encode slice length
    63  	if err := binary.Write(w, binary.BigEndian, uint32(len(*vector))); err != nil {
    64  		return 0, err
    65  	}
    66  
    67  	n := int64(4)
    68  
    69  	var buf [Bytes]byte
    70  	for i := 0; i < len(*vector); i++ {
    71  		BigEndian.PutElement(&buf, (*vector)[i])
    72  		m, err := w.Write(buf[:])
    73  		n += int64(m)
    74  		if err != nil {
    75  			return n, err
    76  		}
    77  	}
    78  	return n, nil
    79  }
    80  
    81  // AsyncReadFrom reads a vector of big endian encoded Element.
    82  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
    83  // It consumes the needed bytes from the reader and returns the number of bytes read and an error if any.
    84  // It also returns a channel that will be closed when the validation is done.
    85  // The validation consist of checking that the elements are smaller than the modulus, and
    86  // converting them to montgomery form.
    87  func (vector *Vector) AsyncReadFrom(r io.Reader) (int64, error, chan error) {
    88  	chErr := make(chan error, 1)
    89  	var buf [Bytes]byte
    90  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
    91  		close(chErr)
    92  		return int64(read), err, chErr
    93  	}
    94  	sliceLen := binary.BigEndian.Uint32(buf[:4])
    95  
    96  	n := int64(4)
    97  	(*vector) = make(Vector, sliceLen)
    98  	if sliceLen == 0 {
    99  		close(chErr)
   100  		return n, nil, chErr
   101  	}
   102  
   103  	bSlice := unsafe.Slice((*byte)(unsafe.Pointer(&(*vector)[0])), sliceLen*Bytes)
   104  	read, err := io.ReadFull(r, bSlice)
   105  	n += int64(read)
   106  	if err != nil {
   107  		close(chErr)
   108  		return n, err, chErr
   109  	}
   110  
   111  	go func() {
   112  		var cptErrors uint64
   113  		// process the elements in parallel
   114  		execute(int(sliceLen), func(start, end int) {
   115  
   116  			var z Element
   117  			for i := start; i < end; i++ {
   118  				// we have to set vector[i]
   119  				bstart := i * Bytes
   120  				bend := bstart + Bytes
   121  				b := bSlice[bstart:bend]
   122  				z[0] = binary.BigEndian.Uint64(b[40:48])
   123  				z[1] = binary.BigEndian.Uint64(b[32:40])
   124  				z[2] = binary.BigEndian.Uint64(b[24:32])
   125  				z[3] = binary.BigEndian.Uint64(b[16:24])
   126  				z[4] = binary.BigEndian.Uint64(b[8:16])
   127  				z[5] = binary.BigEndian.Uint64(b[0:8])
   128  
   129  				if !z.smallerThanModulus() {
   130  					atomic.AddUint64(&cptErrors, 1)
   131  					return
   132  				}
   133  				z.toMont()
   134  				(*vector)[i] = z
   135  			}
   136  		})
   137  
   138  		if cptErrors > 0 {
   139  			chErr <- fmt.Errorf("async read: %d elements failed validation", cptErrors)
   140  		}
   141  		close(chErr)
   142  	}()
   143  	return n, nil, chErr
   144  }
   145  
   146  // ReadFrom implements io.ReaderFrom and reads a vector of big endian encoded Element.
   147  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
   148  func (vector *Vector) ReadFrom(r io.Reader) (int64, error) {
   149  
   150  	var buf [Bytes]byte
   151  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
   152  		return int64(read), err
   153  	}
   154  	sliceLen := binary.BigEndian.Uint32(buf[:4])
   155  
   156  	n := int64(4)
   157  	(*vector) = make(Vector, sliceLen)
   158  
   159  	for i := 0; i < int(sliceLen); i++ {
   160  		read, err := io.ReadFull(r, buf[:])
   161  		n += int64(read)
   162  		if err != nil {
   163  			return n, err
   164  		}
   165  		(*vector)[i], err = BigEndian.Element(&buf)
   166  		if err != nil {
   167  			return n, err
   168  		}
   169  	}
   170  
   171  	return n, nil
   172  }
   173  
   174  // String implements fmt.Stringer interface
   175  func (vector Vector) String() string {
   176  	var sbb strings.Builder
   177  	sbb.WriteByte('[')
   178  	for i := 0; i < len(vector); i++ {
   179  		sbb.WriteString(vector[i].String())
   180  		if i != len(vector)-1 {
   181  			sbb.WriteByte(',')
   182  		}
   183  	}
   184  	sbb.WriteByte(']')
   185  	return sbb.String()
   186  }
   187  
   188  // Len is the number of elements in the collection.
   189  func (vector Vector) Len() int {
   190  	return len(vector)
   191  }
   192  
   193  // Less reports whether the element with
   194  // index i should sort before the element with index j.
   195  func (vector Vector) Less(i, j int) bool {
   196  	return vector[i].Cmp(&vector[j]) == -1
   197  }
   198  
   199  // Swap swaps the elements with indexes i and j.
   200  func (vector Vector) Swap(i, j int) {
   201  	vector[i], vector[j] = vector[j], vector[i]
   202  }
   203  
   204  // TODO @gbotrel make a public package out of that.
   205  // execute executes the work function in parallel.
   206  // this is copy paste from internal/parallel/parallel.go
   207  // as we don't want to generate code importing internal/
   208  func execute(nbIterations int, work func(int, int), maxCpus ...int) {
   209  
   210  	nbTasks := runtime.NumCPU()
   211  	if len(maxCpus) == 1 {
   212  		nbTasks = maxCpus[0]
   213  		if nbTasks < 1 {
   214  			nbTasks = 1
   215  		} else if nbTasks > 512 {
   216  			nbTasks = 512
   217  		}
   218  	}
   219  
   220  	if nbTasks == 1 {
   221  		// no go routines
   222  		work(0, nbIterations)
   223  		return
   224  	}
   225  
   226  	nbIterationsPerCpus := nbIterations / nbTasks
   227  
   228  	// more CPUs than tasks: a CPU will work on exactly one iteration
   229  	if nbIterationsPerCpus < 1 {
   230  		nbIterationsPerCpus = 1
   231  		nbTasks = nbIterations
   232  	}
   233  
   234  	var wg sync.WaitGroup
   235  
   236  	extraTasks := nbIterations - (nbTasks * nbIterationsPerCpus)
   237  	extraTasksOffset := 0
   238  
   239  	for i := 0; i < nbTasks; i++ {
   240  		wg.Add(1)
   241  		_start := i*nbIterationsPerCpus + extraTasksOffset
   242  		_end := _start + nbIterationsPerCpus
   243  		if extraTasks > 0 {
   244  			_end++
   245  			extraTasks--
   246  			extraTasksOffset++
   247  		}
   248  		go func() {
   249  			work(_start, _end)
   250  			wg.Done()
   251  		}()
   252  	}
   253  
   254  	wg.Wait()
   255  }