github.com/consensys/gnark-crypto@v0.14.0/internal/generator/ecc/template/marshal.go.tmpl (about) 1 {{ $G1TAffine := print (toUpper .G1.PointName) "Affine" }} 2 {{ $G1TJacobian := print (toUpper .G1.PointName) "Jac" }} 3 {{ $G1TJacobianExtended := print (toLower .G1.PointName) "JacExtended" }} 4 5 {{ $G2TAffine := print (toUpper .G2.PointName) "Affine" }} 6 {{ $G2TJacobian := print (toUpper .G2.PointName) "Jac" }} 7 {{ $G2TJacobianExtended := print (toLower .G2.PointName) "JacExtended" }} 8 9 10 import ( 11 "io" 12 "reflect" 13 "errors" 14 "encoding/binary" 15 "sync/atomic" 16 17 "github.com/consensys/gnark-crypto/ecc/{{.Name}}/internal/fptower" 18 "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fp" 19 "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr" 20 "github.com/consensys/gnark-crypto/internal/parallel" 21 ) 22 23 24 // To encode G1Affine and G2Affine points, we mask the most significant bits with these bits to specify without ambiguity 25 // metadata needed for point (de)compression 26 {{- if ge .FpUnusedBits 3}} 27 // we follow the BLS12-381 style encoding as specified in ZCash and now IETF 28 // see https://datatracker.ietf.org/doc/draft-irtf-cfrg-pairing-friendly-curves/11/ 29 // Appendix C. ZCash serialization format for BLS12_381 30 const ( 31 mMask byte = 0b111 << 5 32 mUncompressed byte = 0b000 << 5 33 _ byte = 0b001 << 5 // invalid 34 mUncompressedInfinity byte = 0b010 << 5 35 _ byte = 0b011 << 5 // invalid 36 mCompressedSmallest byte = 0b100 << 5 37 mCompressedLargest byte = 0b101 << 5 38 mCompressedInfinity byte = 0b110 << 5 39 _ byte = 0b111 << 5 // invalid 40 ) 41 {{- else}} 42 // we have less than 3 bits available on the msw, so we can't follow BLS12-381 style encoding. 43 // the difference is the case where a point is infinity and uncompressed is not flagged 44 const ( 45 mMask byte = 0b11 << 6 46 mUncompressed byte = 0b00 << 6 47 mCompressedSmallest byte = 0b10 << 6 48 mCompressedLargest byte = 0b11 << 6 49 mCompressedInfinity byte = 0b01 << 6 50 ) 51 {{- end}} 52 53 // SizeOfGT represents the size in bytes that a GT element need in binary form 54 const SizeOfGT = fptower.SizeOfGT 55 56 var ( 57 ErrInvalidInfinityEncoding = errors.New("invalid infinity point encoding") 58 ErrInvalidEncoding = errors.New("invalid point encoding") 59 ) 60 61 // Encoder writes {{.Name}} object values to an output stream 62 type Encoder struct { 63 w io.Writer 64 n int64 // written bytes 65 raw bool // raw vs compressed encoding 66 } 67 68 // Decoder reads {{.Name}} object values from an inbound stream 69 type Decoder struct { 70 r io.Reader 71 n int64 // read bytes 72 subGroupCheck bool // default to true 73 } 74 75 // NewDecoder returns a binary decoder supporting curve {{.Name}} objects in both 76 // compressed and uncompressed (raw) forms 77 func NewDecoder(r io.Reader, options ...func(*Decoder)) *Decoder { 78 d := &Decoder{r: r, subGroupCheck: true } 79 80 for _, o := range options { 81 o(d) 82 } 83 84 return d 85 } 86 87 88 // Decode reads the binary encoding of v from the stream 89 // type must be *uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, *[]G1Affine or *[]G2Affine 90 func (dec *Decoder) Decode(v interface{}) (err error) { 91 rv := reflect.ValueOf(v) 92 if v == nil || rv.Kind() != reflect.Ptr || rv.IsNil() || !rv.Elem().CanSet() { 93 return errors.New("{{.Name}} decoder: unsupported type, need pointer") 94 } 95 96 // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap 97 // in particular, careful attention must be given to usage of Bytes() method on Elements and Points 98 // that return an array (not a slice) of bytes. Using this is beneficial to minimize memory allocations 99 // in very large (de)serialization upstream in gnark. 100 // (but detrimental to code readability here) 101 102 var read64 int64 103 if vf, ok := v.(io.ReaderFrom); ok { 104 read64, err = vf.ReadFrom(dec.r) 105 dec.n+=read64 106 return 107 } 108 109 var buf [SizeOfG2AffineUncompressed]byte 110 var read int 111 var sliceLen uint32 112 113 switch t := v.(type) { 114 case *[][]uint64: 115 if sliceLen, err = dec.readUint32(); err != nil { 116 return 117 } 118 *t = make([][]uint64, sliceLen) 119 120 for i := range *t { 121 if sliceLen, err = dec.readUint32(); err != nil { 122 return 123 } 124 (*t)[i] = make([]uint64, sliceLen) 125 for j := range (*t)[i] { 126 if (*t)[i][j], err = dec.readUint64(); err != nil { 127 return 128 } 129 } 130 } 131 return 132 case *[]uint64: 133 if sliceLen, err = dec.readUint32(); err != nil { 134 return 135 } 136 *t = make([]uint64, sliceLen) 137 for i := range *t { 138 if (*t)[i], err = dec.readUint64(); err != nil { 139 return 140 } 141 } 142 return 143 case *fr.Element: 144 read, err = io.ReadFull(dec.r, buf[:fr.Bytes]) 145 dec.n += int64(read) 146 if err != nil { 147 return 148 } 149 err = t.SetBytesCanonical(buf[:fr.Bytes]) 150 return 151 case *fp.Element: 152 read, err = io.ReadFull(dec.r, buf[:fp.Bytes]) 153 dec.n += int64(read) 154 if err != nil { 155 return 156 } 157 err = t.SetBytesCanonical(buf[:fp.Bytes]) 158 return 159 case *[]fr.Element: 160 read64, err = (*fr.Vector)(t).ReadFrom(dec.r) 161 dec.n+=read64 162 return 163 case *[]fp.Element: 164 read64, err = (*fp.Vector)(t).ReadFrom(dec.r) 165 dec.n+=read64 166 return 167 case *[][]fr.Element: 168 if sliceLen, err = dec.readUint32(); err != nil { 169 return 170 } 171 if len(*t) != int(sliceLen) { 172 *t = make([][]fr.Element, sliceLen) 173 } 174 for i := range *t { 175 read64, err = (*fr.Vector)(&(*t)[i]).ReadFrom(dec.r) 176 dec.n+=read64 177 } 178 return 179 case *G1Affine: 180 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 181 read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) 182 dec.n += int64(read) 183 if err != nil { 184 return 185 } 186 nbBytes := SizeOfG1AffineCompressed 187 188 {{ if ge .FpUnusedBits 3}} 189 // 111, 011, 001 --> invalid mask 190 if isMaskInvalid(buf[0]) { 191 err = ErrInvalidEncoding 192 return 193 } 194 {{- end}} 195 196 // most significant byte contains metadata 197 if !isCompressed(buf[0]) { 198 nbBytes = SizeOfG1AffineUncompressed 199 // we read more. 200 read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) 201 dec.n += int64(read) 202 if err != nil { 203 return 204 } 205 } 206 _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) 207 return 208 case *G2Affine: 209 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 210 read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) 211 dec.n += int64(read) 212 if err != nil { 213 return 214 } 215 nbBytes := SizeOfG2AffineCompressed 216 217 {{ if ge .FpUnusedBits 3}} 218 // 111, 011, 001 --> invalid mask 219 if isMaskInvalid(buf[0]) { 220 err = ErrInvalidEncoding 221 return 222 } 223 {{- end}} 224 225 // most significant byte contains metadata 226 if !isCompressed(buf[0]) { 227 nbBytes = SizeOfG2AffineUncompressed 228 // we read more. 229 read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) 230 dec.n += int64(read) 231 if err != nil { 232 return 233 } 234 } 235 _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) 236 return 237 case *[]G1Affine: 238 sliceLen, err = dec.readUint32() 239 if err != nil { 240 return 241 } 242 if len(*t) != int(sliceLen) || *t == nil { 243 *t = make([]G1Affine, sliceLen) 244 } 245 compressed := make([]bool, sliceLen) 246 for i := 0; i < len(*t); i++ { 247 248 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 249 read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) 250 dec.n += int64(read) 251 if err != nil { 252 return 253 } 254 nbBytes := SizeOfG1AffineCompressed 255 256 {{ if ge .FpUnusedBits 3}} 257 // 111, 011, 001 --> invalid mask 258 if isMaskInvalid(buf[0]) { 259 err = ErrInvalidEncoding 260 return 261 } 262 {{- end}} 263 264 // most significant byte contains metadata 265 if !isCompressed(buf[0]) { 266 nbBytes = SizeOfG1AffineUncompressed 267 // we read more. 268 read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) 269 dec.n += int64(read) 270 if err != nil { 271 return 272 } 273 _, err = (*t)[i].setBytes(buf[:nbBytes], false) 274 if err != nil { 275 return 276 } 277 } else { 278 var r bool 279 if r, err = (*t)[i].unsafeSetCompressedBytes(buf[:nbBytes]); err != nil { 280 return 281 } 282 compressed[i] = !r 283 } 284 } 285 var nbErrs uint64 286 parallel.Execute(len(compressed), func(start, end int){ 287 for i := start; i < end; i++ { 288 if compressed[i] { 289 if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { 290 atomic.AddUint64(&nbErrs, 1) 291 } 292 } else if dec.subGroupCheck { 293 if !(*t)[i].IsInSubGroup() { 294 atomic.AddUint64(&nbErrs, 1) 295 } 296 } 297 } 298 }) 299 if nbErrs != 0 { 300 return errors.New("point decompression failed") 301 } 302 303 return nil 304 case *[]G2Affine: 305 sliceLen, err = dec.readUint32() 306 if err != nil { 307 return 308 } 309 if len(*t) != int(sliceLen) { 310 *t = make([]G2Affine, sliceLen) 311 } 312 compressed := make([]bool, sliceLen) 313 for i := 0; i < len(*t); i++ { 314 315 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 316 read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) 317 dec.n += int64(read) 318 if err != nil { 319 return 320 } 321 nbBytes := SizeOfG2AffineCompressed 322 323 324 {{ if ge .FpUnusedBits 3}} 325 // 111, 011, 001 --> invalid mask 326 if isMaskInvalid(buf[0]) { 327 err = ErrInvalidEncoding 328 return 329 } 330 {{- end}} 331 332 // most significant byte contains metadata 333 if !isCompressed(buf[0]) { 334 nbBytes = SizeOfG2AffineUncompressed 335 // we read more. 336 read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) 337 dec.n += int64(read) 338 if err != nil { 339 return 340 } 341 _, err = (*t)[i].setBytes(buf[:nbBytes], false) 342 if err != nil { 343 return 344 } 345 } else { 346 var r bool 347 if r, err = (*t)[i].unsafeSetCompressedBytes(buf[:nbBytes]); err != nil { 348 return 349 } 350 compressed[i] = !r 351 } 352 } 353 var nbErrs uint64 354 parallel.Execute(len(compressed), func(start, end int){ 355 for i := start; i < end; i++ { 356 if compressed[i] { 357 if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { 358 atomic.AddUint64(&nbErrs, 1) 359 } 360 } else if dec.subGroupCheck { 361 if !(*t)[i].IsInSubGroup() { 362 atomic.AddUint64(&nbErrs, 1) 363 } 364 } 365 } 366 }) 367 if nbErrs != 0 { 368 return errors.New("point decompression failed") 369 } 370 371 return nil 372 default: 373 n := binary.Size(t) 374 if n == -1 { 375 return errors.New("{{.Name}} encoder: unsupported type") 376 } 377 err = binary.Read(dec.r, binary.BigEndian, t) 378 if err == nil { 379 dec.n += int64(n) 380 } 381 return 382 } 383 } 384 385 // BytesRead return total bytes read from reader 386 func (dec *Decoder) BytesRead() int64 { 387 return dec.n 388 } 389 390 func (dec *Decoder) readUint32() (r uint32, err error) { 391 var read int 392 var buf [4]byte 393 read, err = io.ReadFull(dec.r, buf[:4]) 394 dec.n += int64(read) 395 if err != nil { 396 return 397 } 398 r = binary.BigEndian.Uint32(buf[:4]) 399 return 400 } 401 402 func (dec *Decoder) readUint64() (r uint64, err error) { 403 var read int 404 var buf [8]byte 405 read, err = io.ReadFull(dec.r, buf[:]) 406 dec.n += int64(read) 407 if err != nil { 408 return 409 } 410 r = binary.BigEndian.Uint64(buf[:]) 411 return 412 } 413 414 {{ if ge .FpUnusedBits 3}} 415 // isMaskInvalid returns true if the mask is invalid 416 func isMaskInvalid(msb byte) bool { 417 mData := msb & mMask 418 return ((mData == (0b111 << 5)) || (mData == (0b011 << 5)) || (mData == (0b001 << 5))) 419 } 420 {{- end}} 421 422 func isCompressed(msb byte) bool { 423 mData := msb & mMask 424 return !((mData == mUncompressed){{- if ge .FpUnusedBits 3}}||(mData == mUncompressedInfinity) {{- end}}) 425 } 426 427 428 // NewEncoder returns a binary encoder supporting curve {{.Name}} objects 429 func NewEncoder(w io.Writer, options ...func(*Encoder)) *Encoder { 430 // default settings 431 enc := &Encoder { 432 w: w, 433 n: 0, 434 raw: false, 435 } 436 437 // handle options 438 for _, option := range options { 439 option(enc) 440 } 441 442 return enc 443 } 444 445 446 // Encode writes the binary encoding of v to the stream 447 // type must be uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, []G1Affine or []G2Affine 448 func (enc *Encoder) Encode(v interface{}) (err error) { 449 if enc.raw { 450 return enc.encodeRaw(v) 451 } 452 return enc.encode(v) 453 } 454 455 // BytesWritten return total bytes written on writer 456 func (enc *Encoder) BytesWritten() int64 { 457 return enc.n 458 } 459 460 461 // RawEncoding returns an option to use in NewEncoder(...) which sets raw encoding mode to true 462 // points will not be compressed using this option 463 func RawEncoding() func(*Encoder) { 464 return func(enc *Encoder) { 465 enc.raw = true 466 } 467 } 468 469 // NoSubgroupChecks returns an option to use in NewDecoder(...) which disable subgroup checks on the points 470 // the decoder will read. Use with caution, as crafted points from an untrusted source can lead to crypto-attacks. 471 func NoSubgroupChecks() func(*Decoder) { 472 return func(dec *Decoder) { 473 dec.subGroupCheck = false 474 } 475 } 476 477 // isZeroed checks that the provided bytes are at 0 478 func isZeroed(firstByte byte, buf []byte) bool { 479 if firstByte != 0 { 480 return false 481 } 482 for _, b := range buf { 483 if b != 0 { 484 return false 485 } 486 } 487 return true 488 } 489 490 {{template "encode" dict "Raw" ""}} 491 {{template "encode" dict "Raw" "Raw"}} 492 493 func (enc *Encoder) writeUint64Slice(t []uint64) (err error) { 494 if err = enc.writeUint32(uint32(len(t))); err != nil { 495 return 496 } 497 for i := range t { 498 if err = enc.writeUint64(t[i]); err != nil { 499 return 500 } 501 } 502 return nil 503 } 504 505 func (enc *Encoder) writeUint64SliceSlice(t [][]uint64) (err error) { 506 if err = enc.writeUint32(uint32(len(t))); err != nil { 507 return 508 } 509 for i := range t { 510 if err = enc.writeUint32(uint32(len(t[i]))); err != nil { 511 return 512 } 513 for j := range t[i] { 514 if err = enc.writeUint64(t[i][j]); err != nil { 515 return 516 } 517 } 518 } 519 return nil 520 } 521 522 func (enc *Encoder) writeUint64(a uint64) error { 523 var buff [64 / 8]byte 524 binary.BigEndian.PutUint64(buff[:], a) 525 written, err := enc.w.Write(buff[:]) 526 enc.n += int64(written) 527 return err 528 } 529 530 func (enc *Encoder) writeUint32(a uint32) error { 531 var buff [32 / 8]byte 532 binary.BigEndian.PutUint32(buff[:], a) 533 written, err := enc.w.Write(buff[:]) 534 enc.n += int64(written) 535 return err 536 } 537 538 {{ define "encode"}} 539 540 func (enc *Encoder) encode{{- $.Raw}}(v interface{}) (err error) { 541 rv := reflect.ValueOf(v) 542 if v == nil || (rv.Kind() == reflect.Ptr && rv.IsNil()) { 543 return errors.New("{{.Name}} encoder: can't encode <nil>") 544 } 545 546 // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap 547 548 var written64 int64 549 if vw, ok := v.(io.WriterTo); ok { 550 written64, err = vw.WriteTo(enc.w) 551 enc.n += written64 552 return 553 } 554 555 var written int 556 557 switch t := v.(type) { 558 case []uint64: 559 return enc.writeUint64Slice(t) 560 case [][]uint64: 561 return enc.writeUint64SliceSlice(t) 562 case *fr.Element: 563 buf := t.Bytes() 564 written, err = enc.w.Write(buf[:]) 565 enc.n += int64(written) 566 return 567 case *fp.Element: 568 buf := t.Bytes() 569 written, err = enc.w.Write(buf[:]) 570 enc.n += int64(written) 571 return 572 case *G1Affine: 573 buf := t.{{- $.Raw}}Bytes() 574 written, err = enc.w.Write(buf[:]) 575 enc.n += int64(written) 576 return 577 case *G2Affine: 578 buf := t.{{- $.Raw}}Bytes() 579 written, err = enc.w.Write(buf[:]) 580 enc.n += int64(written) 581 return 582 case fr.Vector: 583 written64, err = t.WriteTo(enc.w) 584 enc.n += written64 585 return 586 case fp.Vector: 587 written64, err = t.WriteTo(enc.w) 588 enc.n += written64 589 return 590 case []fr.Element: 591 written64, err = (*fr.Vector)(&t).WriteTo(enc.w) 592 enc.n += written64 593 return 594 case []fp.Element: 595 written64, err = (*fp.Vector)(&t).WriteTo(enc.w) 596 enc.n += written64 597 return 598 case [][]fr.Element: 599 // write slice length 600 if err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))); err != nil { 601 return 602 } 603 enc.n += 4 604 for i := range t { 605 written64, err = (*fr.Vector)(&t[i]).WriteTo(enc.w) 606 enc.n += written64 607 } 608 return 609 case []G1Affine: 610 // write slice length 611 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 612 if err != nil { 613 return 614 } 615 enc.n += 4 616 617 var buf [SizeOfG1Affine{{- if $.Raw}}Uncompressed{{- else}}Compressed{{- end}}]byte 618 619 for i := 0; i < len(t); i++ { 620 buf = t[i].{{- $.Raw}}Bytes() 621 written, err = enc.w.Write(buf[:]) 622 enc.n += int64(written) 623 if err != nil { 624 return 625 } 626 } 627 return nil 628 case []G2Affine: 629 // write slice length 630 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 631 if err != nil { 632 return 633 } 634 enc.n += 4 635 636 var buf [SizeOfG2Affine{{- if $.Raw}}Uncompressed{{- else}}Compressed{{- end}}]byte 637 638 for i := 0; i < len(t); i++ { 639 buf = t[i].{{- $.Raw}}Bytes() 640 written, err = enc.w.Write(buf[:]) 641 enc.n += int64(written) 642 if err != nil { 643 return 644 } 645 } 646 return nil 647 default: 648 n := binary.Size(t) 649 if n == -1 { 650 return errors.New("{{.Name}} encoder: unsupported type") 651 } 652 err = binary.Write(enc.w, binary.BigEndian, t) 653 enc.n += int64(n) 654 return 655 } 656 } 657 {{end}} 658 659 660 {{- $sizeOfFp := mul .Fp.NbWords 8}} 661 662 {{template "marshalpoint" dict "all" . "sizeOfFp" $sizeOfFp "CoordType" .G1.CoordType "PointName" .G1.PointName "TAffine" $G1TAffine "TJacobian" $G1TJacobian "TJacobianExtended" $G1TJacobianExtended "FrNbWords" .Fr.NbWords "CRange" .G1.CRange}} 663 {{template "marshalpoint" dict "all" . "sizeOfFp" $sizeOfFp "CoordType" .G2.CoordType "PointName" .G2.PointName "TAffine" $G2TAffine "TJacobian" $G2TJacobian "TJacobianExtended" $G2TJacobianExtended "FrNbWords" .Fr.NbWords "CRange" .G2.CRange}} 664 665 666 667 {{define "marshalpoint"}} 668 669 670 671 672 673 // SizeOf{{ $.TAffine }}Compressed represents the size in bytes that a {{ $.TAffine }} need in binary form, compressed 674 const SizeOf{{ $.TAffine }}Compressed = {{ $.sizeOfFp }} {{- if eq $.CoordType "fptower.E2"}} * 2 {{- end}} {{- if eq $.CoordType "fptower.E4"}} * 4 {{- end}} 675 676 // SizeOf{{ $.TAffine }}Uncompressed represents the size in bytes that a {{ $.TAffine }} need in binary form, uncompressed 677 const SizeOf{{ $.TAffine }}Uncompressed = SizeOf{{ $.TAffine }}Compressed * 2 678 679 680 681 // Marshal converts p to a byte slice (without point compression) 682 func (p *{{ $.TAffine }}) Marshal() ([]byte) { 683 b := p.RawBytes() 684 return b[:] 685 } 686 687 // Unmarshal is an alias to SetBytes() 688 func (p *{{ $.TAffine }}) Unmarshal(buf []byte) error { 689 _, err := p.SetBytes(buf) 690 return err 691 } 692 693 694 695 696 // Bytes returns binary representation of p 697 // will store X coordinate in regular form and a parity bit 698 {{- if ge .all.FpUnusedBits 3}} 699 // we follow the BLS12-381 style encoding as specified in ZCash and now IETF 700 // 701 // The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. 702 // 703 // The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. 704 // 705 // The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. 706 {{- else}} 707 // as we have less than 3 bits available in our coordinate, we can't follow BLS12-381 style encoding (ZCash/IETF) 708 // 709 // we use the 2 most significant bits instead 710 // 711 // 00 -> uncompressed 712 // 10 -> compressed, use smallest lexicographically square root of Y^2 713 // 11 -> compressed, use largest lexicographically square root of Y^2 714 // 01 -> compressed infinity point 715 // the "uncompressed infinity point" will just have 00 (uncompressed) followed by zeroes (infinity = 0,0 in affine coordinates) 716 {{- end}} 717 func (p *{{ $.TAffine }}) Bytes() (res [SizeOf{{ $.TAffine }}Compressed]byte) { 718 719 // check if p is infinity point 720 if p.X.IsZero() && p.Y.IsZero() { 721 res[0] = mCompressedInfinity 722 return 723 } 724 725 726 msbMask := mCompressedSmallest 727 // compressed, we need to know if Y is lexicographically bigger than -Y 728 // if p.Y ">" -p.Y 729 if p.Y.LexicographicallyLargest() { 730 msbMask = mCompressedLargest 731 } 732 733 // we store X and mask the most significant word with our metadata mask 734 {{- if eq $.CoordType "fptower.E2"}} 735 // p.X.A1 | p.X.A0 736 {{- $offset := $.sizeOfFp}} 737 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.A0"}} 738 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X.A1"}} 739 {{- else if eq $.CoordType "fptower.E4"}} 740 // p.X.B1.A1 | p.X.B1.A0 | p.X.B0.A1 | p.X.B0.A0 741 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X.B1.A1"}} 742 {{- $offset := mul $.sizeOfFp 1}} 743 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B1.A0"}} 744 {{- $offset := mul $.sizeOfFp 2}} 745 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B0.A1"}} 746 {{- $offset := mul $.sizeOfFp 3}} 747 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B0.A0"}} 748 {{- else}} 749 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X"}} 750 {{- end}} 751 752 res[0] |= msbMask 753 754 return 755 } 756 757 758 // RawBytes returns binary representation of p (stores X and Y coordinate) 759 // see Bytes() for a compressed representation 760 func (p *{{ $.TAffine }}) RawBytes() (res [SizeOf{{ $.TAffine }}Uncompressed]byte) { 761 762 // check if p is infinity point 763 if p.X.IsZero() && p.Y.IsZero() { 764 {{if ge .all.FpUnusedBits 3}} 765 res[0] = mUncompressedInfinity 766 {{else}} 767 res[0] = mUncompressed 768 {{end}} 769 return 770 } 771 772 773 // not compressed 774 // we store the Y coordinate 775 {{- if eq $.CoordType "fptower.E2"}} 776 // p.Y.A1 | p.Y.A0 777 {{- $offset := mul $.sizeOfFp 3}} 778 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.A0"}} 779 780 {{- $offset := mul $.sizeOfFp 2}} 781 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.A1"}} 782 {{- else if eq $.CoordType "fptower.E4"}} 783 // p.Y.B1.A1 | p.Y.B1.A0 | p.Y.B0.A1 | p.Y.B0.A0 784 {{- $offset := mul $.sizeOfFp 4}} 785 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.B1.A1"}} 786 {{- $offset := mul $.sizeOfFp 5}} 787 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.B1.A0"}} 788 {{- $offset := mul $.sizeOfFp 6}} 789 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.B0.A1"}} 790 {{- $offset := mul $.sizeOfFp 7}} 791 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.Y.B0.A0"}} 792 {{- else}} 793 {{- template "putFp" dict "all" .all "OffSet" $.sizeOfFp "From" "p.Y"}} 794 {{- end}} 795 796 // we store X and mask the most significant word with our metadata mask 797 {{- if eq $.CoordType "fptower.E2"}} 798 // p.X.A1 | p.X.A0 799 {{- $offset := $.sizeOfFp}} 800 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X.A1"}} 801 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.A0"}} 802 {{- else if eq $.CoordType "fptower.E4"}} 803 // p.X.B1.A1 | p.X.B1.A0 | p.X.B0.A1 | p.X.B0.A0 804 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X.B1.A1"}} 805 {{- $offset := mul $.sizeOfFp 1}} 806 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B1.A0"}} 807 {{- $offset := mul $.sizeOfFp 2}} 808 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B0.A1"}} 809 {{- $offset := mul $.sizeOfFp 3}} 810 {{- template "putFp" dict "all" .all "OffSet" $offset "From" "p.X.B0.A0"}} 811 {{- else}} 812 {{- template "putFp" dict "all" .all "OffSet" 0 "From" "p.X"}} 813 {{- end}} 814 815 res[0] |= mUncompressed 816 817 return 818 } 819 820 821 // SetBytes sets p from binary representation in buf and returns number of consumed bytes 822 // 823 // bytes in buf must match either RawBytes() or Bytes() output 824 // 825 // if buf is too short io.ErrShortBuffer is returned 826 // 827 // if buf contains compressed representation (output from Bytes()) and we're unable to compute 828 // the Y coordinate (i.e the square root doesn't exist) this function returns an error 829 // 830 // this check if the resulting point is on the curve and in the correct subgroup 831 func (p *{{ $.TAffine }}) SetBytes(buf []byte) (int, error) { 832 return p.setBytes(buf, true) 833 } 834 835 836 func (p *{{ $.TAffine }}) setBytes(buf []byte, subGroupCheck bool) (int, error) { 837 if len(buf) < SizeOf{{ $.TAffine }}Compressed { 838 return 0, io.ErrShortBuffer 839 } 840 841 // most significant byte 842 mData := buf[0] & mMask 843 844 {{if ge .all.FpUnusedBits 3}} 845 // 111, 011, 001 --> invalid mask 846 if isMaskInvalid(mData) { 847 return 0, ErrInvalidEncoding 848 } 849 {{- end}} 850 851 852 // check buffer size 853 if (mData == mUncompressed) {{- if ge .all.FpUnusedBits 3}} || (mData == mUncompressedInfinity) {{- end}} { 854 if len(buf) < SizeOf{{ $.TAffine }}Uncompressed { 855 return 0, io.ErrShortBuffer 856 } 857 } 858 859 // infinity encoded, we still check that the buffer is full of zeroes. 860 if (mData == mCompressedInfinity) { 861 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOf{{ $.TAffine }}Compressed]) { 862 return 0, ErrInvalidInfinityEncoding 863 } 864 p.X.SetZero() 865 p.Y.SetZero() 866 return SizeOf{{ $.TAffine }}Compressed, nil 867 } 868 869 {{- if ge .all.FpUnusedBits 3}} 870 if (mData == mUncompressedInfinity) { 871 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOf{{ $.TAffine }}Uncompressed]) { 872 return 0, ErrInvalidInfinityEncoding 873 } 874 p.X.SetZero() 875 p.Y.SetZero() 876 return SizeOf{{ $.TAffine }}Uncompressed, nil 877 } 878 {{- end}} 879 880 // uncompressed point 881 if mData == mUncompressed { 882 // read X and Y coordinates 883 {{- if eq $.CoordType "fptower.E2"}} 884 // p.X.A1 | p.X.A0 885 if err := p.X.A1.SetBytesCanonical(buf[:fp.Bytes]); err != nil { 886 return 0, err 887 } 888 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes:fp.Bytes*2]); err != nil { 889 return 0, err 890 } 891 // p.Y.A1 | p.Y.A0 892 if err := p.Y.A1.SetBytesCanonical(buf[fp.Bytes*2:fp.Bytes*3]); err != nil { 893 return 0, err 894 } 895 if err := p.Y.A0.SetBytesCanonical(buf[fp.Bytes*3:fp.Bytes*4]); err != nil { 896 return 0, err 897 } 898 {{- else if eq $.CoordType "fptower.E4"}} 899 // p.X.B1.A1 | p.X.B1.A0 | p.X.B0.A1 | p.X.B0.A0 900 if err := p.X.B1.A1.SetBytesCanonical(buf[fp.Bytes*0:fp.Bytes*1]); err != nil { 901 return 0, err 902 } 903 if err := p.X.B1.A0.SetBytesCanonical(buf[fp.Bytes*1:fp.Bytes*2]); err != nil { 904 return 0, err 905 } 906 if err := p.X.B0.A1.SetBytesCanonical(buf[fp.Bytes*2:fp.Bytes*3]); err != nil { 907 return 0, err 908 } 909 if err := p.X.B0.A0.SetBytesCanonical(buf[fp.Bytes*3:fp.Bytes*4]); err != nil { 910 return 0, err 911 } 912 // p.Y.B1.A1 | p.Y.B1.A0 | p.Y.B0.A1 | p.Y.B0.A0 913 if err := p.Y.B1.A1.SetBytesCanonical(buf[fp.Bytes*4:fp.Bytes*5]); err != nil { 914 return 0, err 915 } 916 if err := p.Y.B1.A0.SetBytesCanonical(buf[fp.Bytes*5:fp.Bytes*6]); err != nil { 917 return 0, err 918 } 919 if err := p.Y.B0.A1.SetBytesCanonical(buf[fp.Bytes*6:fp.Bytes*7]); err != nil { 920 return 0, err 921 } 922 if err := p.Y.B0.A0.SetBytesCanonical(buf[fp.Bytes*7:fp.Bytes*8]); err != nil { 923 return 0, err 924 } 925 {{- else}} 926 if err := p.X.SetBytesCanonical(buf[:fp.Bytes]); err != nil { 927 return 0, err 928 } 929 if err := p.Y.SetBytesCanonical(buf[fp.Bytes:fp.Bytes*2]); err != nil { 930 return 0, err 931 } 932 {{- end}} 933 934 // subgroup check 935 if subGroupCheck && !p.IsInSubGroup() { 936 return 0, errors.New("invalid point: subgroup check failed") 937 } 938 939 return SizeOf{{ $.TAffine }}Uncompressed, nil 940 } 941 942 // we have a compressed coordinate 943 // we need to 944 // 1. copy the buffer (to keep this method thread safe) 945 // 2. we need to solve the curve equation to compute Y 946 947 var bufX [fp.Bytes]byte 948 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 949 bufX[0] &= ^mMask 950 951 // read X coordinate 952 {{- if eq $.CoordType "fptower.E2"}} 953 // p.X.A1 | p.X.A0 954 if err := p.X.A1.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 955 return 0, err 956 } 957 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes:fp.Bytes*2]); err != nil { 958 return 0, err 959 } 960 {{- else if eq $.CoordType "fptower.E4"}} 961 // p.X.B1.A1 | p.X.B1.A0 | p.X.B0.A1 | p.X.B0.A0 962 if err := p.X.B1.A1.SetBytesCanonical(bufX[fp.Bytes*0:fp.Bytes*1]); err != nil { 963 return 0, err 964 } 965 if err := p.X.B1.A0.SetBytesCanonical(buf[fp.Bytes*1:fp.Bytes*2]); err != nil { 966 return 0, err 967 } 968 if err := p.X.B0.A1.SetBytesCanonical(buf[fp.Bytes*2:fp.Bytes*3]); err != nil { 969 return 0, err 970 } 971 if err := p.X.B0.A0.SetBytesCanonical(buf[fp.Bytes*3:fp.Bytes*4]); err != nil { 972 return 0, err 973 } 974 {{- else}} 975 if err := p.X.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 976 return 0, err 977 } 978 {{- end}} 979 980 981 var YSquared, Y {{$.CoordType}} 982 983 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 984 YSquared.Add(&YSquared, &{{- if eq .PointName "g2"}}bTwistCurveCoeff{{- else}}bCurveCoeff{{- end}}) 985 986 {{- if or (eq $.CoordType "fptower.E2") (eq $.CoordType "fptower.E4")}} 987 if YSquared.Legendre() == -1 { 988 return 0, errors.New("invalid compressed coordinate: square root doesn't exist") 989 } 990 Y.Sqrt(&YSquared) 991 {{- else}} 992 if Y.Sqrt(&YSquared) == nil { 993 return 0, errors.New("invalid compressed coordinate: square root doesn't exist") 994 } 995 {{- end}} 996 997 998 if Y.LexicographicallyLargest() { 999 // Y ">" -Y 1000 if mData == mCompressedSmallest { 1001 Y.Neg(&Y) 1002 } 1003 } else { 1004 // Y "<=" -Y 1005 if mData == mCompressedLargest { 1006 Y.Neg(&Y) 1007 } 1008 } 1009 1010 p.Y = Y 1011 1012 // subgroup check 1013 if subGroupCheck && !p.IsInSubGroup() { 1014 return 0, errors.New("invalid point: subgroup check failed") 1015 } 1016 1017 return SizeOf{{ $.TAffine }}Compressed, nil 1018 } 1019 1020 1021 1022 // unsafeComputeY called by Decoder when processing slices of compressed point in parallel (step 2) 1023 // it computes the Y coordinate from the already set X coordinate and is compute intensive 1024 func (p *{{ $.TAffine }}) unsafeComputeY(subGroupCheck bool) error { 1025 // stored in unsafeSetCompressedBytes 1026 {{ if eq $.CoordType "fptower.E2"}} 1027 mData := byte(p.Y.A0[0]) 1028 {{ else if eq $.CoordType "fptower.E4"}} 1029 mData := byte(p.Y.B0.A0[0]) 1030 {{ else}} 1031 mData := byte(p.Y[0]) 1032 {{ end}} 1033 1034 1035 // we have a compressed coordinate, we need to solve the curve equation to compute Y 1036 var YSquared, Y {{$.CoordType}} 1037 1038 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 1039 YSquared.Add(&YSquared, &{{- if eq .PointName "g2"}}bTwistCurveCoeff{{- else}}bCurveCoeff{{- end}}) 1040 1041 {{- if or (eq $.CoordType "fptower.E2") (eq $.CoordType "fptower.E4")}} 1042 if YSquared.Legendre() == -1 { 1043 return errors.New("invalid compressed coordinate: square root doesn't exist") 1044 } 1045 Y.Sqrt(&YSquared) 1046 {{- else}} 1047 if Y.Sqrt(&YSquared) == nil { 1048 return errors.New("invalid compressed coordinate: square root doesn't exist") 1049 } 1050 {{- end}} 1051 1052 1053 if Y.LexicographicallyLargest() { 1054 // Y ">" -Y 1055 if mData == mCompressedSmallest { 1056 Y.Neg(&Y) 1057 } 1058 } else { 1059 // Y "<=" -Y 1060 if mData == mCompressedLargest { 1061 Y.Neg(&Y) 1062 } 1063 } 1064 1065 p.Y = Y 1066 1067 // subgroup check 1068 if subGroupCheck && !p.IsInSubGroup() { 1069 return errors.New("invalid point: subgroup check failed") 1070 } 1071 1072 return nil 1073 } 1074 1075 // unsafeSetCompressedBytes is called by Decoder when processing slices of compressed point in parallel (step 1) 1076 // assumes buf[:8] mask is set to compressed 1077 // returns true if point is infinity and need no further processing 1078 // it sets X coordinate and uses Y for scratch space to store decompression metadata 1079 func (p *{{ $.TAffine }}) unsafeSetCompressedBytes(buf []byte) (isInfinity bool, err error) { 1080 1081 // read the most significant byte 1082 mData := buf[0] & mMask 1083 1084 if (mData == mCompressedInfinity) { 1085 isInfinity = true 1086 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOf{{ $.TAffine }}Compressed]) { 1087 return isInfinity, ErrInvalidInfinityEncoding 1088 } 1089 p.X.SetZero() 1090 p.Y.SetZero() 1091 return isInfinity, nil 1092 } 1093 1094 // we need to copy the input buffer (to keep this method thread safe) 1095 var bufX [fp.Bytes]byte 1096 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 1097 bufX[0] &= ^mMask 1098 1099 // read X coordinate 1100 {{- if eq $.CoordType "fptower.E2"}} 1101 // p.X.A1 | p.X.A0 1102 if err := p.X.A1.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 1103 return false, err 1104 } 1105 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes:fp.Bytes*2]); err != nil { 1106 return false, err 1107 } 1108 1109 // store mData in p.Y.A0[0] 1110 p.Y.A0[0] = uint64(mData) 1111 {{- else if eq $.CoordType "fptower.E4"}} 1112 // p.X.B1.A1 | p.X.B1.A0 | p.X.B0.A1 | p.X.B0.A0 1113 if err := p.X.B1.A1.SetBytesCanonical(bufX[fp.Bytes*0:fp.Bytes*1]); err != nil { 1114 return false, err 1115 } 1116 if err := p.X.B1.A0.SetBytesCanonical(buf[fp.Bytes*1:fp.Bytes*2]); err != nil { 1117 return false, err 1118 } 1119 if err := p.X.B0.A1.SetBytesCanonical(buf[fp.Bytes*2:fp.Bytes*3]); err != nil { 1120 return false, err 1121 } 1122 if err := p.X.B0.A0.SetBytesCanonical(buf[fp.Bytes*3:fp.Bytes*4]); err != nil { 1123 return false, err 1124 } 1125 1126 // store mData in p.Y.B0.A0[0] 1127 p.Y.B0.A0[0] = uint64(mData) 1128 {{- else}} 1129 if err := p.X.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 1130 return false, err 1131 } 1132 // store mData in p.Y[0] 1133 p.Y[0] = uint64(mData) 1134 {{- end}} 1135 1136 // recomputing Y will be done asynchronously 1137 return isInfinity, nil 1138 } 1139 1140 1141 1142 {{end}} 1143 1144 1145 1146 1147 {{- define "putFp"}} 1148 fp.BigEndian.PutElement((*[fp.Bytes]byte)( res[{{$.OffSet}}:{{$.OffSet}} + fp.Bytes]), {{$.From}}) 1149 {{- end}}