github.com/consensys/gnark-crypto@v0.14.0/ecc/bw6-633/fp/vector.go (about)

     1  // Copyright 2020 ConsenSys Software Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by consensys/gnark-crypto DO NOT EDIT
    16  
    17  package fp
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"io"
    24  	"runtime"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"unsafe"
    29  )
    30  
    31  // Vector represents a slice of Element.
    32  //
    33  // It implements the following interfaces:
    34  //   - Stringer
    35  //   - io.WriterTo
    36  //   - io.ReaderFrom
    37  //   - encoding.BinaryMarshaler
    38  //   - encoding.BinaryUnmarshaler
    39  //   - sort.Interface
    40  type Vector []Element
    41  
    42  // MarshalBinary implements encoding.BinaryMarshaler
    43  func (vector *Vector) MarshalBinary() (data []byte, err error) {
    44  	var buf bytes.Buffer
    45  
    46  	if _, err = vector.WriteTo(&buf); err != nil {
    47  		return
    48  	}
    49  	return buf.Bytes(), nil
    50  }
    51  
    52  // UnmarshalBinary implements encoding.BinaryUnmarshaler
    53  func (vector *Vector) UnmarshalBinary(data []byte) error {
    54  	r := bytes.NewReader(data)
    55  	_, err := vector.ReadFrom(r)
    56  	return err
    57  }
    58  
    59  // WriteTo implements io.WriterTo and writes a vector of big endian encoded Element.
    60  // Length of the vector is encoded as a uint32 on the first 4 bytes.
    61  func (vector *Vector) WriteTo(w io.Writer) (int64, error) {
    62  	// encode slice length
    63  	if err := binary.Write(w, binary.BigEndian, uint32(len(*vector))); err != nil {
    64  		return 0, err
    65  	}
    66  
    67  	n := int64(4)
    68  
    69  	var buf [Bytes]byte
    70  	for i := 0; i < len(*vector); i++ {
    71  		BigEndian.PutElement(&buf, (*vector)[i])
    72  		m, err := w.Write(buf[:])
    73  		n += int64(m)
    74  		if err != nil {
    75  			return n, err
    76  		}
    77  	}
    78  	return n, nil
    79  }
    80  
    81  // AsyncReadFrom reads a vector of big endian encoded Element.
    82  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
    83  // It consumes the needed bytes from the reader and returns the number of bytes read and an error if any.
    84  // It also returns a channel that will be closed when the validation is done.
    85  // The validation consist of checking that the elements are smaller than the modulus, and
    86  // converting them to montgomery form.
    87  func (vector *Vector) AsyncReadFrom(r io.Reader) (int64, error, chan error) {
    88  	chErr := make(chan error, 1)
    89  	var buf [Bytes]byte
    90  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
    91  		close(chErr)
    92  		return int64(read), err, chErr
    93  	}
    94  	sliceLen := binary.BigEndian.Uint32(buf[:4])
    95  
    96  	n := int64(4)
    97  	(*vector) = make(Vector, sliceLen)
    98  	if sliceLen == 0 {
    99  		close(chErr)
   100  		return n, nil, chErr
   101  	}
   102  
   103  	bSlice := unsafe.Slice((*byte)(unsafe.Pointer(&(*vector)[0])), sliceLen*Bytes)
   104  	read, err := io.ReadFull(r, bSlice)
   105  	n += int64(read)
   106  	if err != nil {
   107  		close(chErr)
   108  		return n, err, chErr
   109  	}
   110  
   111  	go func() {
   112  		var cptErrors uint64
   113  		// process the elements in parallel
   114  		execute(int(sliceLen), func(start, end int) {
   115  
   116  			var z Element
   117  			for i := start; i < end; i++ {
   118  				// we have to set vector[i]
   119  				bstart := i * Bytes
   120  				bend := bstart + Bytes
   121  				b := bSlice[bstart:bend]
   122  				z[0] = binary.BigEndian.Uint64(b[72:80])
   123  				z[1] = binary.BigEndian.Uint64(b[64:72])
   124  				z[2] = binary.BigEndian.Uint64(b[56:64])
   125  				z[3] = binary.BigEndian.Uint64(b[48:56])
   126  				z[4] = binary.BigEndian.Uint64(b[40:48])
   127  				z[5] = binary.BigEndian.Uint64(b[32:40])
   128  				z[6] = binary.BigEndian.Uint64(b[24:32])
   129  				z[7] = binary.BigEndian.Uint64(b[16:24])
   130  				z[8] = binary.BigEndian.Uint64(b[8:16])
   131  				z[9] = binary.BigEndian.Uint64(b[0:8])
   132  
   133  				if !z.smallerThanModulus() {
   134  					atomic.AddUint64(&cptErrors, 1)
   135  					return
   136  				}
   137  				z.toMont()
   138  				(*vector)[i] = z
   139  			}
   140  		})
   141  
   142  		if cptErrors > 0 {
   143  			chErr <- fmt.Errorf("async read: %d elements failed validation", cptErrors)
   144  		}
   145  		close(chErr)
   146  	}()
   147  	return n, nil, chErr
   148  }
   149  
   150  // ReadFrom implements io.ReaderFrom and reads a vector of big endian encoded Element.
   151  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
   152  func (vector *Vector) ReadFrom(r io.Reader) (int64, error) {
   153  
   154  	var buf [Bytes]byte
   155  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
   156  		return int64(read), err
   157  	}
   158  	sliceLen := binary.BigEndian.Uint32(buf[:4])
   159  
   160  	n := int64(4)
   161  	(*vector) = make(Vector, sliceLen)
   162  
   163  	for i := 0; i < int(sliceLen); i++ {
   164  		read, err := io.ReadFull(r, buf[:])
   165  		n += int64(read)
   166  		if err != nil {
   167  			return n, err
   168  		}
   169  		(*vector)[i], err = BigEndian.Element(&buf)
   170  		if err != nil {
   171  			return n, err
   172  		}
   173  	}
   174  
   175  	return n, nil
   176  }
   177  
   178  // String implements fmt.Stringer interface
   179  func (vector Vector) String() string {
   180  	var sbb strings.Builder
   181  	sbb.WriteByte('[')
   182  	for i := 0; i < len(vector); i++ {
   183  		sbb.WriteString(vector[i].String())
   184  		if i != len(vector)-1 {
   185  			sbb.WriteByte(',')
   186  		}
   187  	}
   188  	sbb.WriteByte(']')
   189  	return sbb.String()
   190  }
   191  
   192  // Len is the number of elements in the collection.
   193  func (vector Vector) Len() int {
   194  	return len(vector)
   195  }
   196  
   197  // Less reports whether the element with
   198  // index i should sort before the element with index j.
   199  func (vector Vector) Less(i, j int) bool {
   200  	return vector[i].Cmp(&vector[j]) == -1
   201  }
   202  
   203  // Swap swaps the elements with indexes i and j.
   204  func (vector Vector) Swap(i, j int) {
   205  	vector[i], vector[j] = vector[j], vector[i]
   206  }
   207  
   208  // TODO @gbotrel make a public package out of that.
   209  // execute executes the work function in parallel.
   210  // this is copy paste from internal/parallel/parallel.go
   211  // as we don't want to generate code importing internal/
   212  func execute(nbIterations int, work func(int, int), maxCpus ...int) {
   213  
   214  	nbTasks := runtime.NumCPU()
   215  	if len(maxCpus) == 1 {
   216  		nbTasks = maxCpus[0]
   217  		if nbTasks < 1 {
   218  			nbTasks = 1
   219  		} else if nbTasks > 512 {
   220  			nbTasks = 512
   221  		}
   222  	}
   223  
   224  	if nbTasks == 1 {
   225  		// no go routines
   226  		work(0, nbIterations)
   227  		return
   228  	}
   229  
   230  	nbIterationsPerCpus := nbIterations / nbTasks
   231  
   232  	// more CPUs than tasks: a CPU will work on exactly one iteration
   233  	if nbIterationsPerCpus < 1 {
   234  		nbIterationsPerCpus = 1
   235  		nbTasks = nbIterations
   236  	}
   237  
   238  	var wg sync.WaitGroup
   239  
   240  	extraTasks := nbIterations - (nbTasks * nbIterationsPerCpus)
   241  	extraTasksOffset := 0
   242  
   243  	for i := 0; i < nbTasks; i++ {
   244  		wg.Add(1)
   245  		_start := i*nbIterationsPerCpus + extraTasksOffset
   246  		_end := _start + nbIterationsPerCpus
   247  		if extraTasks > 0 {
   248  			_end++
   249  			extraTasks--
   250  			extraTasksOffset++
   251  		}
   252  		go func() {
   253  			work(_start, _end)
   254  			wg.Done()
   255  		}()
   256  	}
   257  
   258  	wg.Wait()
   259  }