github.com/consensys/gnark-crypto@v0.14.0/ecc/bls12-377/marshal.go (about) 1 // Copyright 2020 Consensys Software Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Code generated by consensys/gnark-crypto DO NOT EDIT 16 17 package bls12377 18 19 import ( 20 "encoding/binary" 21 "errors" 22 "io" 23 "reflect" 24 "sync/atomic" 25 26 "github.com/consensys/gnark-crypto/ecc/bls12-377/fp" 27 "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" 28 "github.com/consensys/gnark-crypto/ecc/bls12-377/internal/fptower" 29 "github.com/consensys/gnark-crypto/internal/parallel" 30 ) 31 32 // To encode G1Affine and G2Affine points, we mask the most significant bits with these bits to specify without ambiguity 33 // metadata needed for point (de)compression 34 // we follow the BLS12-381 style encoding as specified in ZCash and now IETF 35 // see https://datatracker.ietf.org/doc/draft-irtf-cfrg-pairing-friendly-curves/11/ 36 // Appendix C. ZCash serialization format for BLS12_381 37 const ( 38 mMask byte = 0b111 << 5 39 mUncompressed byte = 0b000 << 5 40 _ byte = 0b001 << 5 // invalid 41 mUncompressedInfinity byte = 0b010 << 5 42 _ byte = 0b011 << 5 // invalid 43 mCompressedSmallest byte = 0b100 << 5 44 mCompressedLargest byte = 0b101 << 5 45 mCompressedInfinity byte = 0b110 << 5 46 _ byte = 0b111 << 5 // invalid 47 ) 48 49 // SizeOfGT represents the size in bytes that a GT element need in binary form 50 const SizeOfGT = fptower.SizeOfGT 51 52 var ( 53 ErrInvalidInfinityEncoding = errors.New("invalid infinity point encoding") 54 ErrInvalidEncoding = errors.New("invalid point encoding") 55 ) 56 57 // Encoder writes bls12-377 object values to an output stream 58 type Encoder struct { 59 w io.Writer 60 n int64 // written bytes 61 raw bool // raw vs compressed encoding 62 } 63 64 // Decoder reads bls12-377 object values from an inbound stream 65 type Decoder struct { 66 r io.Reader 67 n int64 // read bytes 68 subGroupCheck bool // default to true 69 } 70 71 // NewDecoder returns a binary decoder supporting curve bls12-377 objects in both 72 // compressed and uncompressed (raw) forms 73 func NewDecoder(r io.Reader, options ...func(*Decoder)) *Decoder { 74 d := &Decoder{r: r, subGroupCheck: true} 75 76 for _, o := range options { 77 o(d) 78 } 79 80 return d 81 } 82 83 // Decode reads the binary encoding of v from the stream 84 // type must be *uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, *[]G1Affine or *[]G2Affine 85 func (dec *Decoder) Decode(v interface{}) (err error) { 86 rv := reflect.ValueOf(v) 87 if v == nil || rv.Kind() != reflect.Ptr || rv.IsNil() || !rv.Elem().CanSet() { 88 return errors.New("bls12-377 decoder: unsupported type, need pointer") 89 } 90 91 // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap 92 // in particular, careful attention must be given to usage of Bytes() method on Elements and Points 93 // that return an array (not a slice) of bytes. Using this is beneficial to minimize memory allocations 94 // in very large (de)serialization upstream in gnark. 95 // (but detrimental to code readability here) 96 97 var read64 int64 98 if vf, ok := v.(io.ReaderFrom); ok { 99 read64, err = vf.ReadFrom(dec.r) 100 dec.n += read64 101 return 102 } 103 104 var buf [SizeOfG2AffineUncompressed]byte 105 var read int 106 var sliceLen uint32 107 108 switch t := v.(type) { 109 case *[][]uint64: 110 if sliceLen, err = dec.readUint32(); err != nil { 111 return 112 } 113 *t = make([][]uint64, sliceLen) 114 115 for i := range *t { 116 if sliceLen, err = dec.readUint32(); err != nil { 117 return 118 } 119 (*t)[i] = make([]uint64, sliceLen) 120 for j := range (*t)[i] { 121 if (*t)[i][j], err = dec.readUint64(); err != nil { 122 return 123 } 124 } 125 } 126 return 127 case *[]uint64: 128 if sliceLen, err = dec.readUint32(); err != nil { 129 return 130 } 131 *t = make([]uint64, sliceLen) 132 for i := range *t { 133 if (*t)[i], err = dec.readUint64(); err != nil { 134 return 135 } 136 } 137 return 138 case *fr.Element: 139 read, err = io.ReadFull(dec.r, buf[:fr.Bytes]) 140 dec.n += int64(read) 141 if err != nil { 142 return 143 } 144 err = t.SetBytesCanonical(buf[:fr.Bytes]) 145 return 146 case *fp.Element: 147 read, err = io.ReadFull(dec.r, buf[:fp.Bytes]) 148 dec.n += int64(read) 149 if err != nil { 150 return 151 } 152 err = t.SetBytesCanonical(buf[:fp.Bytes]) 153 return 154 case *[]fr.Element: 155 read64, err = (*fr.Vector)(t).ReadFrom(dec.r) 156 dec.n += read64 157 return 158 case *[]fp.Element: 159 read64, err = (*fp.Vector)(t).ReadFrom(dec.r) 160 dec.n += read64 161 return 162 case *[][]fr.Element: 163 if sliceLen, err = dec.readUint32(); err != nil { 164 return 165 } 166 if len(*t) != int(sliceLen) { 167 *t = make([][]fr.Element, sliceLen) 168 } 169 for i := range *t { 170 read64, err = (*fr.Vector)(&(*t)[i]).ReadFrom(dec.r) 171 dec.n += read64 172 } 173 return 174 case *G1Affine: 175 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 176 read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) 177 dec.n += int64(read) 178 if err != nil { 179 return 180 } 181 nbBytes := SizeOfG1AffineCompressed 182 183 // 111, 011, 001 --> invalid mask 184 if isMaskInvalid(buf[0]) { 185 err = ErrInvalidEncoding 186 return 187 } 188 189 // most significant byte contains metadata 190 if !isCompressed(buf[0]) { 191 nbBytes = SizeOfG1AffineUncompressed 192 // we read more. 193 read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) 194 dec.n += int64(read) 195 if err != nil { 196 return 197 } 198 } 199 _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) 200 return 201 case *G2Affine: 202 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 203 read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) 204 dec.n += int64(read) 205 if err != nil { 206 return 207 } 208 nbBytes := SizeOfG2AffineCompressed 209 210 // 111, 011, 001 --> invalid mask 211 if isMaskInvalid(buf[0]) { 212 err = ErrInvalidEncoding 213 return 214 } 215 216 // most significant byte contains metadata 217 if !isCompressed(buf[0]) { 218 nbBytes = SizeOfG2AffineUncompressed 219 // we read more. 220 read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) 221 dec.n += int64(read) 222 if err != nil { 223 return 224 } 225 } 226 _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) 227 return 228 case *[]G1Affine: 229 sliceLen, err = dec.readUint32() 230 if err != nil { 231 return 232 } 233 if len(*t) != int(sliceLen) || *t == nil { 234 *t = make([]G1Affine, sliceLen) 235 } 236 compressed := make([]bool, sliceLen) 237 for i := 0; i < len(*t); i++ { 238 239 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 240 read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) 241 dec.n += int64(read) 242 if err != nil { 243 return 244 } 245 nbBytes := SizeOfG1AffineCompressed 246 247 // 111, 011, 001 --> invalid mask 248 if isMaskInvalid(buf[0]) { 249 err = ErrInvalidEncoding 250 return 251 } 252 253 // most significant byte contains metadata 254 if !isCompressed(buf[0]) { 255 nbBytes = SizeOfG1AffineUncompressed 256 // we read more. 257 read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) 258 dec.n += int64(read) 259 if err != nil { 260 return 261 } 262 _, err = (*t)[i].setBytes(buf[:nbBytes], false) 263 if err != nil { 264 return 265 } 266 } else { 267 var r bool 268 if r, err = (*t)[i].unsafeSetCompressedBytes(buf[:nbBytes]); err != nil { 269 return 270 } 271 compressed[i] = !r 272 } 273 } 274 var nbErrs uint64 275 parallel.Execute(len(compressed), func(start, end int) { 276 for i := start; i < end; i++ { 277 if compressed[i] { 278 if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { 279 atomic.AddUint64(&nbErrs, 1) 280 } 281 } else if dec.subGroupCheck { 282 if !(*t)[i].IsInSubGroup() { 283 atomic.AddUint64(&nbErrs, 1) 284 } 285 } 286 } 287 }) 288 if nbErrs != 0 { 289 return errors.New("point decompression failed") 290 } 291 292 return nil 293 case *[]G2Affine: 294 sliceLen, err = dec.readUint32() 295 if err != nil { 296 return 297 } 298 if len(*t) != int(sliceLen) { 299 *t = make([]G2Affine, sliceLen) 300 } 301 compressed := make([]bool, sliceLen) 302 for i := 0; i < len(*t); i++ { 303 304 // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. 305 read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) 306 dec.n += int64(read) 307 if err != nil { 308 return 309 } 310 nbBytes := SizeOfG2AffineCompressed 311 312 // 111, 011, 001 --> invalid mask 313 if isMaskInvalid(buf[0]) { 314 err = ErrInvalidEncoding 315 return 316 } 317 318 // most significant byte contains metadata 319 if !isCompressed(buf[0]) { 320 nbBytes = SizeOfG2AffineUncompressed 321 // we read more. 322 read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) 323 dec.n += int64(read) 324 if err != nil { 325 return 326 } 327 _, err = (*t)[i].setBytes(buf[:nbBytes], false) 328 if err != nil { 329 return 330 } 331 } else { 332 var r bool 333 if r, err = (*t)[i].unsafeSetCompressedBytes(buf[:nbBytes]); err != nil { 334 return 335 } 336 compressed[i] = !r 337 } 338 } 339 var nbErrs uint64 340 parallel.Execute(len(compressed), func(start, end int) { 341 for i := start; i < end; i++ { 342 if compressed[i] { 343 if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { 344 atomic.AddUint64(&nbErrs, 1) 345 } 346 } else if dec.subGroupCheck { 347 if !(*t)[i].IsInSubGroup() { 348 atomic.AddUint64(&nbErrs, 1) 349 } 350 } 351 } 352 }) 353 if nbErrs != 0 { 354 return errors.New("point decompression failed") 355 } 356 357 return nil 358 default: 359 n := binary.Size(t) 360 if n == -1 { 361 return errors.New("bls12-377 encoder: unsupported type") 362 } 363 err = binary.Read(dec.r, binary.BigEndian, t) 364 if err == nil { 365 dec.n += int64(n) 366 } 367 return 368 } 369 } 370 371 // BytesRead return total bytes read from reader 372 func (dec *Decoder) BytesRead() int64 { 373 return dec.n 374 } 375 376 func (dec *Decoder) readUint32() (r uint32, err error) { 377 var read int 378 var buf [4]byte 379 read, err = io.ReadFull(dec.r, buf[:4]) 380 dec.n += int64(read) 381 if err != nil { 382 return 383 } 384 r = binary.BigEndian.Uint32(buf[:4]) 385 return 386 } 387 388 func (dec *Decoder) readUint64() (r uint64, err error) { 389 var read int 390 var buf [8]byte 391 read, err = io.ReadFull(dec.r, buf[:]) 392 dec.n += int64(read) 393 if err != nil { 394 return 395 } 396 r = binary.BigEndian.Uint64(buf[:]) 397 return 398 } 399 400 // isMaskInvalid returns true if the mask is invalid 401 func isMaskInvalid(msb byte) bool { 402 mData := msb & mMask 403 return ((mData == (0b111 << 5)) || (mData == (0b011 << 5)) || (mData == (0b001 << 5))) 404 } 405 406 func isCompressed(msb byte) bool { 407 mData := msb & mMask 408 return !((mData == mUncompressed) || (mData == mUncompressedInfinity)) 409 } 410 411 // NewEncoder returns a binary encoder supporting curve bls12-377 objects 412 func NewEncoder(w io.Writer, options ...func(*Encoder)) *Encoder { 413 // default settings 414 enc := &Encoder{ 415 w: w, 416 n: 0, 417 raw: false, 418 } 419 420 // handle options 421 for _, option := range options { 422 option(enc) 423 } 424 425 return enc 426 } 427 428 // Encode writes the binary encoding of v to the stream 429 // type must be uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, []G1Affine or []G2Affine 430 func (enc *Encoder) Encode(v interface{}) (err error) { 431 if enc.raw { 432 return enc.encodeRaw(v) 433 } 434 return enc.encode(v) 435 } 436 437 // BytesWritten return total bytes written on writer 438 func (enc *Encoder) BytesWritten() int64 { 439 return enc.n 440 } 441 442 // RawEncoding returns an option to use in NewEncoder(...) which sets raw encoding mode to true 443 // points will not be compressed using this option 444 func RawEncoding() func(*Encoder) { 445 return func(enc *Encoder) { 446 enc.raw = true 447 } 448 } 449 450 // NoSubgroupChecks returns an option to use in NewDecoder(...) which disable subgroup checks on the points 451 // the decoder will read. Use with caution, as crafted points from an untrusted source can lead to crypto-attacks. 452 func NoSubgroupChecks() func(*Decoder) { 453 return func(dec *Decoder) { 454 dec.subGroupCheck = false 455 } 456 } 457 458 // isZeroed checks that the provided bytes are at 0 459 func isZeroed(firstByte byte, buf []byte) bool { 460 if firstByte != 0 { 461 return false 462 } 463 for _, b := range buf { 464 if b != 0 { 465 return false 466 } 467 } 468 return true 469 } 470 471 func (enc *Encoder) encode(v interface{}) (err error) { 472 rv := reflect.ValueOf(v) 473 if v == nil || (rv.Kind() == reflect.Ptr && rv.IsNil()) { 474 return errors.New("<no value> encoder: can't encode <nil>") 475 } 476 477 // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap 478 479 var written64 int64 480 if vw, ok := v.(io.WriterTo); ok { 481 written64, err = vw.WriteTo(enc.w) 482 enc.n += written64 483 return 484 } 485 486 var written int 487 488 switch t := v.(type) { 489 case []uint64: 490 return enc.writeUint64Slice(t) 491 case [][]uint64: 492 return enc.writeUint64SliceSlice(t) 493 case *fr.Element: 494 buf := t.Bytes() 495 written, err = enc.w.Write(buf[:]) 496 enc.n += int64(written) 497 return 498 case *fp.Element: 499 buf := t.Bytes() 500 written, err = enc.w.Write(buf[:]) 501 enc.n += int64(written) 502 return 503 case *G1Affine: 504 buf := t.Bytes() 505 written, err = enc.w.Write(buf[:]) 506 enc.n += int64(written) 507 return 508 case *G2Affine: 509 buf := t.Bytes() 510 written, err = enc.w.Write(buf[:]) 511 enc.n += int64(written) 512 return 513 case fr.Vector: 514 written64, err = t.WriteTo(enc.w) 515 enc.n += written64 516 return 517 case fp.Vector: 518 written64, err = t.WriteTo(enc.w) 519 enc.n += written64 520 return 521 case []fr.Element: 522 written64, err = (*fr.Vector)(&t).WriteTo(enc.w) 523 enc.n += written64 524 return 525 case []fp.Element: 526 written64, err = (*fp.Vector)(&t).WriteTo(enc.w) 527 enc.n += written64 528 return 529 case [][]fr.Element: 530 // write slice length 531 if err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))); err != nil { 532 return 533 } 534 enc.n += 4 535 for i := range t { 536 written64, err = (*fr.Vector)(&t[i]).WriteTo(enc.w) 537 enc.n += written64 538 } 539 return 540 case []G1Affine: 541 // write slice length 542 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 543 if err != nil { 544 return 545 } 546 enc.n += 4 547 548 var buf [SizeOfG1AffineCompressed]byte 549 550 for i := 0; i < len(t); i++ { 551 buf = t[i].Bytes() 552 written, err = enc.w.Write(buf[:]) 553 enc.n += int64(written) 554 if err != nil { 555 return 556 } 557 } 558 return nil 559 case []G2Affine: 560 // write slice length 561 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 562 if err != nil { 563 return 564 } 565 enc.n += 4 566 567 var buf [SizeOfG2AffineCompressed]byte 568 569 for i := 0; i < len(t); i++ { 570 buf = t[i].Bytes() 571 written, err = enc.w.Write(buf[:]) 572 enc.n += int64(written) 573 if err != nil { 574 return 575 } 576 } 577 return nil 578 default: 579 n := binary.Size(t) 580 if n == -1 { 581 return errors.New("<no value> encoder: unsupported type") 582 } 583 err = binary.Write(enc.w, binary.BigEndian, t) 584 enc.n += int64(n) 585 return 586 } 587 } 588 589 func (enc *Encoder) encodeRaw(v interface{}) (err error) { 590 rv := reflect.ValueOf(v) 591 if v == nil || (rv.Kind() == reflect.Ptr && rv.IsNil()) { 592 return errors.New("<no value> encoder: can't encode <nil>") 593 } 594 595 // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap 596 597 var written64 int64 598 if vw, ok := v.(io.WriterTo); ok { 599 written64, err = vw.WriteTo(enc.w) 600 enc.n += written64 601 return 602 } 603 604 var written int 605 606 switch t := v.(type) { 607 case []uint64: 608 return enc.writeUint64Slice(t) 609 case [][]uint64: 610 return enc.writeUint64SliceSlice(t) 611 case *fr.Element: 612 buf := t.Bytes() 613 written, err = enc.w.Write(buf[:]) 614 enc.n += int64(written) 615 return 616 case *fp.Element: 617 buf := t.Bytes() 618 written, err = enc.w.Write(buf[:]) 619 enc.n += int64(written) 620 return 621 case *G1Affine: 622 buf := t.RawBytes() 623 written, err = enc.w.Write(buf[:]) 624 enc.n += int64(written) 625 return 626 case *G2Affine: 627 buf := t.RawBytes() 628 written, err = enc.w.Write(buf[:]) 629 enc.n += int64(written) 630 return 631 case fr.Vector: 632 written64, err = t.WriteTo(enc.w) 633 enc.n += written64 634 return 635 case fp.Vector: 636 written64, err = t.WriteTo(enc.w) 637 enc.n += written64 638 return 639 case []fr.Element: 640 written64, err = (*fr.Vector)(&t).WriteTo(enc.w) 641 enc.n += written64 642 return 643 case []fp.Element: 644 written64, err = (*fp.Vector)(&t).WriteTo(enc.w) 645 enc.n += written64 646 return 647 case [][]fr.Element: 648 // write slice length 649 if err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))); err != nil { 650 return 651 } 652 enc.n += 4 653 for i := range t { 654 written64, err = (*fr.Vector)(&t[i]).WriteTo(enc.w) 655 enc.n += written64 656 } 657 return 658 case []G1Affine: 659 // write slice length 660 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 661 if err != nil { 662 return 663 } 664 enc.n += 4 665 666 var buf [SizeOfG1AffineUncompressed]byte 667 668 for i := 0; i < len(t); i++ { 669 buf = t[i].RawBytes() 670 written, err = enc.w.Write(buf[:]) 671 enc.n += int64(written) 672 if err != nil { 673 return 674 } 675 } 676 return nil 677 case []G2Affine: 678 // write slice length 679 err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) 680 if err != nil { 681 return 682 } 683 enc.n += 4 684 685 var buf [SizeOfG2AffineUncompressed]byte 686 687 for i := 0; i < len(t); i++ { 688 buf = t[i].RawBytes() 689 written, err = enc.w.Write(buf[:]) 690 enc.n += int64(written) 691 if err != nil { 692 return 693 } 694 } 695 return nil 696 default: 697 n := binary.Size(t) 698 if n == -1 { 699 return errors.New("<no value> encoder: unsupported type") 700 } 701 err = binary.Write(enc.w, binary.BigEndian, t) 702 enc.n += int64(n) 703 return 704 } 705 } 706 707 func (enc *Encoder) writeUint64Slice(t []uint64) (err error) { 708 if err = enc.writeUint32(uint32(len(t))); err != nil { 709 return 710 } 711 for i := range t { 712 if err = enc.writeUint64(t[i]); err != nil { 713 return 714 } 715 } 716 return nil 717 } 718 719 func (enc *Encoder) writeUint64SliceSlice(t [][]uint64) (err error) { 720 if err = enc.writeUint32(uint32(len(t))); err != nil { 721 return 722 } 723 for i := range t { 724 if err = enc.writeUint32(uint32(len(t[i]))); err != nil { 725 return 726 } 727 for j := range t[i] { 728 if err = enc.writeUint64(t[i][j]); err != nil { 729 return 730 } 731 } 732 } 733 return nil 734 } 735 736 func (enc *Encoder) writeUint64(a uint64) error { 737 var buff [64 / 8]byte 738 binary.BigEndian.PutUint64(buff[:], a) 739 written, err := enc.w.Write(buff[:]) 740 enc.n += int64(written) 741 return err 742 } 743 744 func (enc *Encoder) writeUint32(a uint32) error { 745 var buff [32 / 8]byte 746 binary.BigEndian.PutUint32(buff[:], a) 747 written, err := enc.w.Write(buff[:]) 748 enc.n += int64(written) 749 return err 750 } 751 752 // SizeOfG1AffineCompressed represents the size in bytes that a G1Affine need in binary form, compressed 753 const SizeOfG1AffineCompressed = 48 754 755 // SizeOfG1AffineUncompressed represents the size in bytes that a G1Affine need in binary form, uncompressed 756 const SizeOfG1AffineUncompressed = SizeOfG1AffineCompressed * 2 757 758 // Marshal converts p to a byte slice (without point compression) 759 func (p *G1Affine) Marshal() []byte { 760 b := p.RawBytes() 761 return b[:] 762 } 763 764 // Unmarshal is an alias to SetBytes() 765 func (p *G1Affine) Unmarshal(buf []byte) error { 766 _, err := p.SetBytes(buf) 767 return err 768 } 769 770 // Bytes returns binary representation of p 771 // will store X coordinate in regular form and a parity bit 772 // we follow the BLS12-381 style encoding as specified in ZCash and now IETF 773 // 774 // The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. 775 // 776 // The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. 777 // 778 // The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. 779 func (p *G1Affine) Bytes() (res [SizeOfG1AffineCompressed]byte) { 780 781 // check if p is infinity point 782 if p.X.IsZero() && p.Y.IsZero() { 783 res[0] = mCompressedInfinity 784 return 785 } 786 787 msbMask := mCompressedSmallest 788 // compressed, we need to know if Y is lexicographically bigger than -Y 789 // if p.Y ">" -p.Y 790 if p.Y.LexicographicallyLargest() { 791 msbMask = mCompressedLargest 792 } 793 794 // we store X and mask the most significant word with our metadata mask 795 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[0:0+fp.Bytes]), p.X) 796 797 res[0] |= msbMask 798 799 return 800 } 801 802 // RawBytes returns binary representation of p (stores X and Y coordinate) 803 // see Bytes() for a compressed representation 804 func (p *G1Affine) RawBytes() (res [SizeOfG1AffineUncompressed]byte) { 805 806 // check if p is infinity point 807 if p.X.IsZero() && p.Y.IsZero() { 808 809 res[0] = mUncompressedInfinity 810 811 return 812 } 813 814 // not compressed 815 // we store the Y coordinate 816 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[48:48+fp.Bytes]), p.Y) 817 818 // we store X and mask the most significant word with our metadata mask 819 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[0:0+fp.Bytes]), p.X) 820 821 res[0] |= mUncompressed 822 823 return 824 } 825 826 // SetBytes sets p from binary representation in buf and returns number of consumed bytes 827 // 828 // bytes in buf must match either RawBytes() or Bytes() output 829 // 830 // if buf is too short io.ErrShortBuffer is returned 831 // 832 // if buf contains compressed representation (output from Bytes()) and we're unable to compute 833 // the Y coordinate (i.e the square root doesn't exist) this function returns an error 834 // 835 // this check if the resulting point is on the curve and in the correct subgroup 836 func (p *G1Affine) SetBytes(buf []byte) (int, error) { 837 return p.setBytes(buf, true) 838 } 839 840 func (p *G1Affine) setBytes(buf []byte, subGroupCheck bool) (int, error) { 841 if len(buf) < SizeOfG1AffineCompressed { 842 return 0, io.ErrShortBuffer 843 } 844 845 // most significant byte 846 mData := buf[0] & mMask 847 848 // 111, 011, 001 --> invalid mask 849 if isMaskInvalid(mData) { 850 return 0, ErrInvalidEncoding 851 } 852 853 // check buffer size 854 if (mData == mUncompressed) || (mData == mUncompressedInfinity) { 855 if len(buf) < SizeOfG1AffineUncompressed { 856 return 0, io.ErrShortBuffer 857 } 858 } 859 860 // infinity encoded, we still check that the buffer is full of zeroes. 861 if mData == mCompressedInfinity { 862 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG1AffineCompressed]) { 863 return 0, ErrInvalidInfinityEncoding 864 } 865 p.X.SetZero() 866 p.Y.SetZero() 867 return SizeOfG1AffineCompressed, nil 868 } 869 if mData == mUncompressedInfinity { 870 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG1AffineUncompressed]) { 871 return 0, ErrInvalidInfinityEncoding 872 } 873 p.X.SetZero() 874 p.Y.SetZero() 875 return SizeOfG1AffineUncompressed, nil 876 } 877 878 // uncompressed point 879 if mData == mUncompressed { 880 // read X and Y coordinates 881 if err := p.X.SetBytesCanonical(buf[:fp.Bytes]); err != nil { 882 return 0, err 883 } 884 if err := p.Y.SetBytesCanonical(buf[fp.Bytes : fp.Bytes*2]); err != nil { 885 return 0, err 886 } 887 888 // subgroup check 889 if subGroupCheck && !p.IsInSubGroup() { 890 return 0, errors.New("invalid point: subgroup check failed") 891 } 892 893 return SizeOfG1AffineUncompressed, nil 894 } 895 896 // we have a compressed coordinate 897 // we need to 898 // 1. copy the buffer (to keep this method thread safe) 899 // 2. we need to solve the curve equation to compute Y 900 901 var bufX [fp.Bytes]byte 902 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 903 bufX[0] &= ^mMask 904 905 // read X coordinate 906 if err := p.X.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 907 return 0, err 908 } 909 910 var YSquared, Y fp.Element 911 912 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 913 YSquared.Add(&YSquared, &bCurveCoeff) 914 if Y.Sqrt(&YSquared) == nil { 915 return 0, errors.New("invalid compressed coordinate: square root doesn't exist") 916 } 917 918 if Y.LexicographicallyLargest() { 919 // Y ">" -Y 920 if mData == mCompressedSmallest { 921 Y.Neg(&Y) 922 } 923 } else { 924 // Y "<=" -Y 925 if mData == mCompressedLargest { 926 Y.Neg(&Y) 927 } 928 } 929 930 p.Y = Y 931 932 // subgroup check 933 if subGroupCheck && !p.IsInSubGroup() { 934 return 0, errors.New("invalid point: subgroup check failed") 935 } 936 937 return SizeOfG1AffineCompressed, nil 938 } 939 940 // unsafeComputeY called by Decoder when processing slices of compressed point in parallel (step 2) 941 // it computes the Y coordinate from the already set X coordinate and is compute intensive 942 func (p *G1Affine) unsafeComputeY(subGroupCheck bool) error { 943 // stored in unsafeSetCompressedBytes 944 945 mData := byte(p.Y[0]) 946 947 // we have a compressed coordinate, we need to solve the curve equation to compute Y 948 var YSquared, Y fp.Element 949 950 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 951 YSquared.Add(&YSquared, &bCurveCoeff) 952 if Y.Sqrt(&YSquared) == nil { 953 return errors.New("invalid compressed coordinate: square root doesn't exist") 954 } 955 956 if Y.LexicographicallyLargest() { 957 // Y ">" -Y 958 if mData == mCompressedSmallest { 959 Y.Neg(&Y) 960 } 961 } else { 962 // Y "<=" -Y 963 if mData == mCompressedLargest { 964 Y.Neg(&Y) 965 } 966 } 967 968 p.Y = Y 969 970 // subgroup check 971 if subGroupCheck && !p.IsInSubGroup() { 972 return errors.New("invalid point: subgroup check failed") 973 } 974 975 return nil 976 } 977 978 // unsafeSetCompressedBytes is called by Decoder when processing slices of compressed point in parallel (step 1) 979 // assumes buf[:8] mask is set to compressed 980 // returns true if point is infinity and need no further processing 981 // it sets X coordinate and uses Y for scratch space to store decompression metadata 982 func (p *G1Affine) unsafeSetCompressedBytes(buf []byte) (isInfinity bool, err error) { 983 984 // read the most significant byte 985 mData := buf[0] & mMask 986 987 if mData == mCompressedInfinity { 988 isInfinity = true 989 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG1AffineCompressed]) { 990 return isInfinity, ErrInvalidInfinityEncoding 991 } 992 p.X.SetZero() 993 p.Y.SetZero() 994 return isInfinity, nil 995 } 996 997 // we need to copy the input buffer (to keep this method thread safe) 998 var bufX [fp.Bytes]byte 999 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 1000 bufX[0] &= ^mMask 1001 1002 // read X coordinate 1003 if err := p.X.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 1004 return false, err 1005 } 1006 // store mData in p.Y[0] 1007 p.Y[0] = uint64(mData) 1008 1009 // recomputing Y will be done asynchronously 1010 return isInfinity, nil 1011 } 1012 1013 // SizeOfG2AffineCompressed represents the size in bytes that a G2Affine need in binary form, compressed 1014 const SizeOfG2AffineCompressed = 48 * 2 1015 1016 // SizeOfG2AffineUncompressed represents the size in bytes that a G2Affine need in binary form, uncompressed 1017 const SizeOfG2AffineUncompressed = SizeOfG2AffineCompressed * 2 1018 1019 // Marshal converts p to a byte slice (without point compression) 1020 func (p *G2Affine) Marshal() []byte { 1021 b := p.RawBytes() 1022 return b[:] 1023 } 1024 1025 // Unmarshal is an alias to SetBytes() 1026 func (p *G2Affine) Unmarshal(buf []byte) error { 1027 _, err := p.SetBytes(buf) 1028 return err 1029 } 1030 1031 // Bytes returns binary representation of p 1032 // will store X coordinate in regular form and a parity bit 1033 // we follow the BLS12-381 style encoding as specified in ZCash and now IETF 1034 // 1035 // The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. 1036 // 1037 // The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. 1038 // 1039 // The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. 1040 func (p *G2Affine) Bytes() (res [SizeOfG2AffineCompressed]byte) { 1041 1042 // check if p is infinity point 1043 if p.X.IsZero() && p.Y.IsZero() { 1044 res[0] = mCompressedInfinity 1045 return 1046 } 1047 1048 msbMask := mCompressedSmallest 1049 // compressed, we need to know if Y is lexicographically bigger than -Y 1050 // if p.Y ">" -p.Y 1051 if p.Y.LexicographicallyLargest() { 1052 msbMask = mCompressedLargest 1053 } 1054 1055 // we store X and mask the most significant word with our metadata mask 1056 // p.X.A1 | p.X.A0 1057 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[48:48+fp.Bytes]), p.X.A0) 1058 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[0:0+fp.Bytes]), p.X.A1) 1059 1060 res[0] |= msbMask 1061 1062 return 1063 } 1064 1065 // RawBytes returns binary representation of p (stores X and Y coordinate) 1066 // see Bytes() for a compressed representation 1067 func (p *G2Affine) RawBytes() (res [SizeOfG2AffineUncompressed]byte) { 1068 1069 // check if p is infinity point 1070 if p.X.IsZero() && p.Y.IsZero() { 1071 1072 res[0] = mUncompressedInfinity 1073 1074 return 1075 } 1076 1077 // not compressed 1078 // we store the Y coordinate 1079 // p.Y.A1 | p.Y.A0 1080 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[144:144+fp.Bytes]), p.Y.A0) 1081 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[96:96+fp.Bytes]), p.Y.A1) 1082 1083 // we store X and mask the most significant word with our metadata mask 1084 // p.X.A1 | p.X.A0 1085 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[0:0+fp.Bytes]), p.X.A1) 1086 fp.BigEndian.PutElement((*[fp.Bytes]byte)(res[48:48+fp.Bytes]), p.X.A0) 1087 1088 res[0] |= mUncompressed 1089 1090 return 1091 } 1092 1093 // SetBytes sets p from binary representation in buf and returns number of consumed bytes 1094 // 1095 // bytes in buf must match either RawBytes() or Bytes() output 1096 // 1097 // if buf is too short io.ErrShortBuffer is returned 1098 // 1099 // if buf contains compressed representation (output from Bytes()) and we're unable to compute 1100 // the Y coordinate (i.e the square root doesn't exist) this function returns an error 1101 // 1102 // this check if the resulting point is on the curve and in the correct subgroup 1103 func (p *G2Affine) SetBytes(buf []byte) (int, error) { 1104 return p.setBytes(buf, true) 1105 } 1106 1107 func (p *G2Affine) setBytes(buf []byte, subGroupCheck bool) (int, error) { 1108 if len(buf) < SizeOfG2AffineCompressed { 1109 return 0, io.ErrShortBuffer 1110 } 1111 1112 // most significant byte 1113 mData := buf[0] & mMask 1114 1115 // 111, 011, 001 --> invalid mask 1116 if isMaskInvalid(mData) { 1117 return 0, ErrInvalidEncoding 1118 } 1119 1120 // check buffer size 1121 if (mData == mUncompressed) || (mData == mUncompressedInfinity) { 1122 if len(buf) < SizeOfG2AffineUncompressed { 1123 return 0, io.ErrShortBuffer 1124 } 1125 } 1126 1127 // infinity encoded, we still check that the buffer is full of zeroes. 1128 if mData == mCompressedInfinity { 1129 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG2AffineCompressed]) { 1130 return 0, ErrInvalidInfinityEncoding 1131 } 1132 p.X.SetZero() 1133 p.Y.SetZero() 1134 return SizeOfG2AffineCompressed, nil 1135 } 1136 if mData == mUncompressedInfinity { 1137 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG2AffineUncompressed]) { 1138 return 0, ErrInvalidInfinityEncoding 1139 } 1140 p.X.SetZero() 1141 p.Y.SetZero() 1142 return SizeOfG2AffineUncompressed, nil 1143 } 1144 1145 // uncompressed point 1146 if mData == mUncompressed { 1147 // read X and Y coordinates 1148 // p.X.A1 | p.X.A0 1149 if err := p.X.A1.SetBytesCanonical(buf[:fp.Bytes]); err != nil { 1150 return 0, err 1151 } 1152 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes : fp.Bytes*2]); err != nil { 1153 return 0, err 1154 } 1155 // p.Y.A1 | p.Y.A0 1156 if err := p.Y.A1.SetBytesCanonical(buf[fp.Bytes*2 : fp.Bytes*3]); err != nil { 1157 return 0, err 1158 } 1159 if err := p.Y.A0.SetBytesCanonical(buf[fp.Bytes*3 : fp.Bytes*4]); err != nil { 1160 return 0, err 1161 } 1162 1163 // subgroup check 1164 if subGroupCheck && !p.IsInSubGroup() { 1165 return 0, errors.New("invalid point: subgroup check failed") 1166 } 1167 1168 return SizeOfG2AffineUncompressed, nil 1169 } 1170 1171 // we have a compressed coordinate 1172 // we need to 1173 // 1. copy the buffer (to keep this method thread safe) 1174 // 2. we need to solve the curve equation to compute Y 1175 1176 var bufX [fp.Bytes]byte 1177 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 1178 bufX[0] &= ^mMask 1179 1180 // read X coordinate 1181 // p.X.A1 | p.X.A0 1182 if err := p.X.A1.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 1183 return 0, err 1184 } 1185 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes : fp.Bytes*2]); err != nil { 1186 return 0, err 1187 } 1188 1189 var YSquared, Y fptower.E2 1190 1191 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 1192 YSquared.Add(&YSquared, &bTwistCurveCoeff) 1193 if YSquared.Legendre() == -1 { 1194 return 0, errors.New("invalid compressed coordinate: square root doesn't exist") 1195 } 1196 Y.Sqrt(&YSquared) 1197 1198 if Y.LexicographicallyLargest() { 1199 // Y ">" -Y 1200 if mData == mCompressedSmallest { 1201 Y.Neg(&Y) 1202 } 1203 } else { 1204 // Y "<=" -Y 1205 if mData == mCompressedLargest { 1206 Y.Neg(&Y) 1207 } 1208 } 1209 1210 p.Y = Y 1211 1212 // subgroup check 1213 if subGroupCheck && !p.IsInSubGroup() { 1214 return 0, errors.New("invalid point: subgroup check failed") 1215 } 1216 1217 return SizeOfG2AffineCompressed, nil 1218 } 1219 1220 // unsafeComputeY called by Decoder when processing slices of compressed point in parallel (step 2) 1221 // it computes the Y coordinate from the already set X coordinate and is compute intensive 1222 func (p *G2Affine) unsafeComputeY(subGroupCheck bool) error { 1223 // stored in unsafeSetCompressedBytes 1224 1225 mData := byte(p.Y.A0[0]) 1226 1227 // we have a compressed coordinate, we need to solve the curve equation to compute Y 1228 var YSquared, Y fptower.E2 1229 1230 YSquared.Square(&p.X).Mul(&YSquared, &p.X) 1231 YSquared.Add(&YSquared, &bTwistCurveCoeff) 1232 if YSquared.Legendre() == -1 { 1233 return errors.New("invalid compressed coordinate: square root doesn't exist") 1234 } 1235 Y.Sqrt(&YSquared) 1236 1237 if Y.LexicographicallyLargest() { 1238 // Y ">" -Y 1239 if mData == mCompressedSmallest { 1240 Y.Neg(&Y) 1241 } 1242 } else { 1243 // Y "<=" -Y 1244 if mData == mCompressedLargest { 1245 Y.Neg(&Y) 1246 } 1247 } 1248 1249 p.Y = Y 1250 1251 // subgroup check 1252 if subGroupCheck && !p.IsInSubGroup() { 1253 return errors.New("invalid point: subgroup check failed") 1254 } 1255 1256 return nil 1257 } 1258 1259 // unsafeSetCompressedBytes is called by Decoder when processing slices of compressed point in parallel (step 1) 1260 // assumes buf[:8] mask is set to compressed 1261 // returns true if point is infinity and need no further processing 1262 // it sets X coordinate and uses Y for scratch space to store decompression metadata 1263 func (p *G2Affine) unsafeSetCompressedBytes(buf []byte) (isInfinity bool, err error) { 1264 1265 // read the most significant byte 1266 mData := buf[0] & mMask 1267 1268 if mData == mCompressedInfinity { 1269 isInfinity = true 1270 if !isZeroed(buf[0] & ^mMask, buf[1:SizeOfG2AffineCompressed]) { 1271 return isInfinity, ErrInvalidInfinityEncoding 1272 } 1273 p.X.SetZero() 1274 p.Y.SetZero() 1275 return isInfinity, nil 1276 } 1277 1278 // we need to copy the input buffer (to keep this method thread safe) 1279 var bufX [fp.Bytes]byte 1280 copy(bufX[:fp.Bytes], buf[:fp.Bytes]) 1281 bufX[0] &= ^mMask 1282 1283 // read X coordinate 1284 // p.X.A1 | p.X.A0 1285 if err := p.X.A1.SetBytesCanonical(bufX[:fp.Bytes]); err != nil { 1286 return false, err 1287 } 1288 if err := p.X.A0.SetBytesCanonical(buf[fp.Bytes : fp.Bytes*2]); err != nil { 1289 return false, err 1290 } 1291 1292 // store mData in p.Y.A0[0] 1293 p.Y.A0[0] = uint64(mData) 1294 1295 // recomputing Y will be done asynchronously 1296 return isInfinity, nil 1297 }