github.com/consensys/gnark-crypto@v0.14.0/ecc/bw6-756/fp/vector.go (about)

     1  // Copyright 2020 ConsenSys Software Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by consensys/gnark-crypto DO NOT EDIT
    16  
    17  package fp
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"io"
    24  	"runtime"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"unsafe"
    29  )
    30  
    31  // Vector represents a slice of Element.
    32  //
    33  // It implements the following interfaces:
    34  //   - Stringer
    35  //   - io.WriterTo
    36  //   - io.ReaderFrom
    37  //   - encoding.BinaryMarshaler
    38  //   - encoding.BinaryUnmarshaler
    39  //   - sort.Interface
    40  type Vector []Element
    41  
    42  // MarshalBinary implements encoding.BinaryMarshaler
    43  func (vector *Vector) MarshalBinary() (data []byte, err error) {
    44  	var buf bytes.Buffer
    45  
    46  	if _, err = vector.WriteTo(&buf); err != nil {
    47  		return
    48  	}
    49  	return buf.Bytes(), nil
    50  }
    51  
    52  // UnmarshalBinary implements encoding.BinaryUnmarshaler
    53  func (vector *Vector) UnmarshalBinary(data []byte) error {
    54  	r := bytes.NewReader(data)
    55  	_, err := vector.ReadFrom(r)
    56  	return err
    57  }
    58  
    59  // WriteTo implements io.WriterTo and writes a vector of big endian encoded Element.
    60  // Length of the vector is encoded as a uint32 on the first 4 bytes.
    61  func (vector *Vector) WriteTo(w io.Writer) (int64, error) {
    62  	// encode slice length
    63  	if err := binary.Write(w, binary.BigEndian, uint32(len(*vector))); err != nil {
    64  		return 0, err
    65  	}
    66  
    67  	n := int64(4)
    68  
    69  	var buf [Bytes]byte
    70  	for i := 0; i < len(*vector); i++ {
    71  		BigEndian.PutElement(&buf, (*vector)[i])
    72  		m, err := w.Write(buf[:])
    73  		n += int64(m)
    74  		if err != nil {
    75  			return n, err
    76  		}
    77  	}
    78  	return n, nil
    79  }
    80  
    81  // AsyncReadFrom reads a vector of big endian encoded Element.
    82  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
    83  // It consumes the needed bytes from the reader and returns the number of bytes read and an error if any.
    84  // It also returns a channel that will be closed when the validation is done.
    85  // The validation consist of checking that the elements are smaller than the modulus, and
    86  // converting them to montgomery form.
    87  func (vector *Vector) AsyncReadFrom(r io.Reader) (int64, error, chan error) {
    88  	chErr := make(chan error, 1)
    89  	var buf [Bytes]byte
    90  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
    91  		close(chErr)
    92  		return int64(read), err, chErr
    93  	}
    94  	sliceLen := binary.BigEndian.Uint32(buf[:4])
    95  
    96  	n := int64(4)
    97  	(*vector) = make(Vector, sliceLen)
    98  	if sliceLen == 0 {
    99  		close(chErr)
   100  		return n, nil, chErr
   101  	}
   102  
   103  	bSlice := unsafe.Slice((*byte)(unsafe.Pointer(&(*vector)[0])), sliceLen*Bytes)
   104  	read, err := io.ReadFull(r, bSlice)
   105  	n += int64(read)
   106  	if err != nil {
   107  		close(chErr)
   108  		return n, err, chErr
   109  	}
   110  
   111  	go func() {
   112  		var cptErrors uint64
   113  		// process the elements in parallel
   114  		execute(int(sliceLen), func(start, end int) {
   115  
   116  			var z Element
   117  			for i := start; i < end; i++ {
   118  				// we have to set vector[i]
   119  				bstart := i * Bytes
   120  				bend := bstart + Bytes
   121  				b := bSlice[bstart:bend]
   122  				z[0] = binary.BigEndian.Uint64(b[88:96])
   123  				z[1] = binary.BigEndian.Uint64(b[80:88])
   124  				z[2] = binary.BigEndian.Uint64(b[72:80])
   125  				z[3] = binary.BigEndian.Uint64(b[64:72])
   126  				z[4] = binary.BigEndian.Uint64(b[56:64])
   127  				z[5] = binary.BigEndian.Uint64(b[48:56])
   128  				z[6] = binary.BigEndian.Uint64(b[40:48])
   129  				z[7] = binary.BigEndian.Uint64(b[32:40])
   130  				z[8] = binary.BigEndian.Uint64(b[24:32])
   131  				z[9] = binary.BigEndian.Uint64(b[16:24])
   132  				z[10] = binary.BigEndian.Uint64(b[8:16])
   133  				z[11] = binary.BigEndian.Uint64(b[0:8])
   134  
   135  				if !z.smallerThanModulus() {
   136  					atomic.AddUint64(&cptErrors, 1)
   137  					return
   138  				}
   139  				z.toMont()
   140  				(*vector)[i] = z
   141  			}
   142  		})
   143  
   144  		if cptErrors > 0 {
   145  			chErr <- fmt.Errorf("async read: %d elements failed validation", cptErrors)
   146  		}
   147  		close(chErr)
   148  	}()
   149  	return n, nil, chErr
   150  }
   151  
   152  // ReadFrom implements io.ReaderFrom and reads a vector of big endian encoded Element.
   153  // Length of the vector must be encoded as a uint32 on the first 4 bytes.
   154  func (vector *Vector) ReadFrom(r io.Reader) (int64, error) {
   155  
   156  	var buf [Bytes]byte
   157  	if read, err := io.ReadFull(r, buf[:4]); err != nil {
   158  		return int64(read), err
   159  	}
   160  	sliceLen := binary.BigEndian.Uint32(buf[:4])
   161  
   162  	n := int64(4)
   163  	(*vector) = make(Vector, sliceLen)
   164  
   165  	for i := 0; i < int(sliceLen); i++ {
   166  		read, err := io.ReadFull(r, buf[:])
   167  		n += int64(read)
   168  		if err != nil {
   169  			return n, err
   170  		}
   171  		(*vector)[i], err = BigEndian.Element(&buf)
   172  		if err != nil {
   173  			return n, err
   174  		}
   175  	}
   176  
   177  	return n, nil
   178  }
   179  
   180  // String implements fmt.Stringer interface
   181  func (vector Vector) String() string {
   182  	var sbb strings.Builder
   183  	sbb.WriteByte('[')
   184  	for i := 0; i < len(vector); i++ {
   185  		sbb.WriteString(vector[i].String())
   186  		if i != len(vector)-1 {
   187  			sbb.WriteByte(',')
   188  		}
   189  	}
   190  	sbb.WriteByte(']')
   191  	return sbb.String()
   192  }
   193  
   194  // Len is the number of elements in the collection.
   195  func (vector Vector) Len() int {
   196  	return len(vector)
   197  }
   198  
   199  // Less reports whether the element with
   200  // index i should sort before the element with index j.
   201  func (vector Vector) Less(i, j int) bool {
   202  	return vector[i].Cmp(&vector[j]) == -1
   203  }
   204  
   205  // Swap swaps the elements with indexes i and j.
   206  func (vector Vector) Swap(i, j int) {
   207  	vector[i], vector[j] = vector[j], vector[i]
   208  }
   209  
   210  // TODO @gbotrel make a public package out of that.
   211  // execute executes the work function in parallel.
   212  // this is copy paste from internal/parallel/parallel.go
   213  // as we don't want to generate code importing internal/
   214  func execute(nbIterations int, work func(int, int), maxCpus ...int) {
   215  
   216  	nbTasks := runtime.NumCPU()
   217  	if len(maxCpus) == 1 {
   218  		nbTasks = maxCpus[0]
   219  		if nbTasks < 1 {
   220  			nbTasks = 1
   221  		} else if nbTasks > 512 {
   222  			nbTasks = 512
   223  		}
   224  	}
   225  
   226  	if nbTasks == 1 {
   227  		// no go routines
   228  		work(0, nbIterations)
   229  		return
   230  	}
   231  
   232  	nbIterationsPerCpus := nbIterations / nbTasks
   233  
   234  	// more CPUs than tasks: a CPU will work on exactly one iteration
   235  	if nbIterationsPerCpus < 1 {
   236  		nbIterationsPerCpus = 1
   237  		nbTasks = nbIterations
   238  	}
   239  
   240  	var wg sync.WaitGroup
   241  
   242  	extraTasks := nbIterations - (nbTasks * nbIterationsPerCpus)
   243  	extraTasksOffset := 0
   244  
   245  	for i := 0; i < nbTasks; i++ {
   246  		wg.Add(1)
   247  		_start := i*nbIterationsPerCpus + extraTasksOffset
   248  		_end := _start + nbIterationsPerCpus
   249  		if extraTasks > 0 {
   250  			_end++
   251  			extraTasks--
   252  			extraTasksOffset++
   253  		}
   254  		go func() {
   255  			work(_start, _end)
   256  			wg.Done()
   257  		}()
   258  	}
   259  
   260  	wg.Wait()
   261  }