github.com/ava-labs/avalanchego@v1.11.11/codec/reflectcodec/type_codec.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package reflectcodec 5 6 import ( 7 "bytes" 8 "errors" 9 "fmt" 10 "math" 11 "reflect" 12 "slices" 13 14 "github.com/ava-labs/avalanchego/codec" 15 "github.com/ava-labs/avalanchego/utils/set" 16 "github.com/ava-labs/avalanchego/utils/wrappers" 17 ) 18 19 const ( 20 // DefaultTagName that enables serialization. 21 DefaultTagName = "serialize" 22 initialSliceLen = 16 23 ) 24 25 var ( 26 _ codec.Codec = (*genericCodec)(nil) 27 28 errNeedPointer = errors.New("argument to unmarshal must be a pointer") 29 errRecursiveInterfaceTypes = errors.New("recursive interface types") 30 ) 31 32 type TypeCodec interface { 33 // UnpackPrefix unpacks the prefix of an interface from the given packer. 34 // The prefix specifies the concrete type that the interface should be 35 // deserialized into. This function returns a new instance of that concrete 36 // type. The concrete type must implement the given type. 37 UnpackPrefix(*wrappers.Packer, reflect.Type) (reflect.Value, error) 38 39 // PackPrefix packs the prefix for the given type into the given packer. 40 // This identifies the bytes that follow, which are the byte representation 41 // of an interface, as having the given concrete type. 42 // When deserializing the bytes, the prefix specifies which concrete type 43 // to deserialize into. 44 PackPrefix(*wrappers.Packer, reflect.Type) error 45 46 // PrefixSize returns prefix length for the given type into the given 47 // packer. 48 PrefixSize(reflect.Type) int 49 } 50 51 // genericCodec handles marshaling and unmarshaling of structs with a generic 52 // implementation for interface encoding. 53 // 54 // A few notes: 55 // 56 // 1. We use "marshal" and "serialize" interchangeably, and "unmarshal" and 57 // "deserialize" interchangeably 58 // 2. To include a field of a struct in the serialized form, add the tag 59 // `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. 60 // 3. These typed members of a struct may be serialized: 61 // bool, string, uint[8,16,32,64], int[8,16,32,64], 62 // structs, slices, arrays, maps, interface. 63 // structs, slices, maps and arrays can only be serialized if their constituent 64 // values can be. 65 // 4. To marshal an interface, you must pass a pointer to the value 66 // 5. To unmarshal an interface, you must call 67 // codec.RegisterType([instance of the type that fulfills the interface]). 68 // 6. Serialized fields must be exported 69 // 7. nil slices are marshaled as empty slices 70 type genericCodec struct { 71 typer TypeCodec 72 fielder StructFielder 73 } 74 75 // New returns a new, concurrency-safe codec 76 func New(typer TypeCodec, tagNames []string) codec.Codec { 77 return &genericCodec{ 78 typer: typer, 79 fielder: NewStructFielder(tagNames), 80 } 81 } 82 83 func (c *genericCodec) Size(value interface{}) (int, error) { 84 if value == nil { 85 return 0, codec.ErrMarshalNil 86 } 87 88 size, _, err := c.size(reflect.ValueOf(value), nil /*=typeStack*/) 89 return size, err 90 } 91 92 // size returns the size of the value along with whether the value is constant 93 // sized. 94 func (c *genericCodec) size( 95 value reflect.Value, 96 typeStack set.Set[reflect.Type], 97 ) (int, bool, error) { 98 switch valueKind := value.Kind(); valueKind { 99 case reflect.Uint8: 100 return wrappers.ByteLen, true, nil 101 case reflect.Int8: 102 return wrappers.ByteLen, true, nil 103 case reflect.Uint16: 104 return wrappers.ShortLen, true, nil 105 case reflect.Int16: 106 return wrappers.ShortLen, true, nil 107 case reflect.Uint32: 108 return wrappers.IntLen, true, nil 109 case reflect.Int32: 110 return wrappers.IntLen, true, nil 111 case reflect.Uint64: 112 return wrappers.LongLen, true, nil 113 case reflect.Int64: 114 return wrappers.LongLen, true, nil 115 case reflect.Bool: 116 return wrappers.BoolLen, true, nil 117 case reflect.String: 118 return wrappers.StringLen(value.String()), false, nil 119 case reflect.Ptr: 120 if value.IsNil() { 121 return 0, false, codec.ErrMarshalNil 122 } 123 124 return c.size(value.Elem(), typeStack) 125 126 case reflect.Interface: 127 if value.IsNil() { 128 return 0, false, codec.ErrMarshalNil 129 } 130 131 underlyingValue := value.Interface() 132 underlyingType := reflect.TypeOf(underlyingValue) 133 if typeStack.Contains(underlyingType) { 134 return 0, false, fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) 135 } 136 typeStack.Add(underlyingType) 137 138 prefixSize := c.typer.PrefixSize(underlyingType) 139 valueSize, _, err := c.size(value.Elem(), typeStack) 140 141 typeStack.Remove(underlyingType) 142 return prefixSize + valueSize, false, err 143 144 case reflect.Slice: 145 numElts := value.Len() 146 if numElts == 0 { 147 return wrappers.IntLen, false, nil 148 } 149 150 size, constSize, err := c.size(value.Index(0), typeStack) 151 if err != nil { 152 return 0, false, err 153 } 154 155 if size == 0 { 156 return 0, false, fmt.Errorf("can't marshal slice of zero length values: %w", codec.ErrMarshalZeroLength) 157 } 158 159 // For fixed-size types we manually calculate lengths rather than 160 // processing each element separately to improve performance. 161 if constSize { 162 return wrappers.IntLen + numElts*size, false, nil 163 } 164 165 for i := 1; i < numElts; i++ { 166 innerSize, _, err := c.size(value.Index(i), typeStack) 167 if err != nil { 168 return 0, false, err 169 } 170 size += innerSize 171 } 172 return wrappers.IntLen + size, false, nil 173 174 case reflect.Array: 175 numElts := value.Len() 176 if numElts == 0 { 177 return 0, true, nil 178 } 179 180 size, constSize, err := c.size(value.Index(0), typeStack) 181 if err != nil { 182 return 0, false, err 183 } 184 185 // For fixed-size types we manually calculate lengths rather than 186 // processing each element separately to improve performance. 187 if constSize { 188 return numElts * size, true, nil 189 } 190 191 for i := 1; i < numElts; i++ { 192 innerSize, _, err := c.size(value.Index(i), typeStack) 193 if err != nil { 194 return 0, false, err 195 } 196 size += innerSize 197 } 198 return size, false, nil 199 200 case reflect.Struct: 201 serializedFields, err := c.fielder.GetSerializedFields(value.Type()) 202 if err != nil { 203 return 0, false, err 204 } 205 206 var ( 207 size int 208 constSize = true 209 ) 210 for _, fieldIndex := range serializedFields { 211 innerSize, innerConstSize, err := c.size(value.Field(fieldIndex), typeStack) 212 if err != nil { 213 return 0, false, err 214 } 215 size += innerSize 216 constSize = constSize && innerConstSize 217 } 218 return size, constSize, nil 219 220 case reflect.Map: 221 iter := value.MapRange() 222 if !iter.Next() { 223 return wrappers.IntLen, false, nil 224 } 225 226 keySize, keyConstSize, err := c.size(iter.Key(), typeStack) 227 if err != nil { 228 return 0, false, err 229 } 230 valueSize, valueConstSize, err := c.size(iter.Value(), typeStack) 231 if err != nil { 232 return 0, false, err 233 } 234 235 if keySize == 0 && valueSize == 0 { 236 return 0, false, fmt.Errorf("can't marshal map with zero length entries: %w", codec.ErrMarshalZeroLength) 237 } 238 239 switch { 240 case keyConstSize && valueConstSize: 241 numElts := value.Len() 242 return wrappers.IntLen + numElts*(keySize+valueSize), false, nil 243 case keyConstSize: 244 var ( 245 numElts = 1 246 totalValueSize = valueSize 247 ) 248 for iter.Next() { 249 valueSize, _, err := c.size(iter.Value(), typeStack) 250 if err != nil { 251 return 0, false, err 252 } 253 totalValueSize += valueSize 254 numElts++ 255 } 256 return wrappers.IntLen + numElts*keySize + totalValueSize, false, nil 257 case valueConstSize: 258 var ( 259 numElts = 1 260 totalKeySize = keySize 261 ) 262 for iter.Next() { 263 keySize, _, err := c.size(iter.Key(), typeStack) 264 if err != nil { 265 return 0, false, err 266 } 267 totalKeySize += keySize 268 numElts++ 269 } 270 return wrappers.IntLen + totalKeySize + numElts*valueSize, false, nil 271 default: 272 totalSize := wrappers.IntLen + keySize + valueSize 273 for iter.Next() { 274 keySize, _, err := c.size(iter.Key(), typeStack) 275 if err != nil { 276 return 0, false, err 277 } 278 valueSize, _, err := c.size(iter.Value(), typeStack) 279 if err != nil { 280 return 0, false, err 281 } 282 totalSize += keySize + valueSize 283 } 284 return totalSize, false, nil 285 } 286 287 default: 288 return 0, false, fmt.Errorf("can't evaluate marshal length of unknown kind %s", valueKind) 289 } 290 } 291 292 // To marshal an interface, [value] must be a pointer to the interface 293 func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error { 294 if value == nil { 295 return codec.ErrMarshalNil 296 } 297 298 return c.marshal(reflect.ValueOf(value), p, nil /*=typeStack*/) 299 } 300 301 // marshal writes the byte representation of [value] to [p] 302 // 303 // c.lock should be held for the duration of this function 304 func (c *genericCodec) marshal( 305 value reflect.Value, 306 p *wrappers.Packer, 307 typeStack set.Set[reflect.Type], 308 ) error { 309 switch valueKind := value.Kind(); valueKind { 310 case reflect.Uint8: 311 p.PackByte(uint8(value.Uint())) 312 return p.Err 313 case reflect.Int8: 314 p.PackByte(uint8(value.Int())) 315 return p.Err 316 case reflect.Uint16: 317 p.PackShort(uint16(value.Uint())) 318 return p.Err 319 case reflect.Int16: 320 p.PackShort(uint16(value.Int())) 321 return p.Err 322 case reflect.Uint32: 323 p.PackInt(uint32(value.Uint())) 324 return p.Err 325 case reflect.Int32: 326 p.PackInt(uint32(value.Int())) 327 return p.Err 328 case reflect.Uint64: 329 p.PackLong(value.Uint()) 330 return p.Err 331 case reflect.Int64: 332 p.PackLong(uint64(value.Int())) 333 return p.Err 334 case reflect.String: 335 p.PackStr(value.String()) 336 return p.Err 337 case reflect.Bool: 338 p.PackBool(value.Bool()) 339 return p.Err 340 case reflect.Ptr: 341 if value.IsNil() { 342 return codec.ErrMarshalNil 343 } 344 345 return c.marshal(value.Elem(), p, typeStack) 346 case reflect.Interface: 347 if value.IsNil() { 348 return codec.ErrMarshalNil 349 } 350 351 underlyingValue := value.Interface() 352 underlyingType := reflect.TypeOf(underlyingValue) 353 if typeStack.Contains(underlyingType) { 354 return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) 355 } 356 typeStack.Add(underlyingType) 357 if err := c.typer.PackPrefix(p, underlyingType); err != nil { 358 return err 359 } 360 if err := c.marshal(value.Elem(), p, typeStack); err != nil { 361 return err 362 } 363 typeStack.Remove(underlyingType) 364 return p.Err 365 case reflect.Slice: 366 numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. 367 if numElts > math.MaxInt32 { 368 return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", 369 codec.ErrMaxSliceLenExceeded, 370 numElts, 371 math.MaxInt32, 372 ) 373 } 374 p.PackInt(uint32(numElts)) // pack # elements 375 if p.Err != nil { 376 return p.Err 377 } 378 if numElts == 0 { 379 // Returning here prevents execution of the (expensive) reflect 380 // calls below which check if the slice is []byte and, if it is, 381 // the call of value.Bytes() 382 return nil 383 } 384 // If this is a slice of bytes, manually pack the bytes rather 385 // than calling marshal on each byte. This improves performance. 386 if elemKind := value.Type().Elem().Kind(); elemKind == reflect.Uint8 { 387 p.PackFixedBytes(value.Bytes()) 388 return p.Err 389 } 390 for i := 0; i < numElts; i++ { // Process each element in the slice 391 startOffset := p.Offset 392 if err := c.marshal(value.Index(i), p, typeStack); err != nil { 393 return err 394 } 395 if startOffset == p.Offset { 396 return fmt.Errorf("couldn't marshal slice of zero length values: %w", codec.ErrMarshalZeroLength) 397 } 398 } 399 return nil 400 case reflect.Array: 401 if elemKind := value.Type().Kind(); elemKind == reflect.Uint8 { 402 sliceVal := value.Convert(reflect.TypeOf([]byte{})) 403 p.PackFixedBytes(sliceVal.Bytes()) 404 return p.Err 405 } 406 numElts := value.Len() 407 for i := 0; i < numElts; i++ { // Process each element in the array 408 if err := c.marshal(value.Index(i), p, typeStack); err != nil { 409 return err 410 } 411 } 412 return nil 413 case reflect.Struct: 414 serializedFields, err := c.fielder.GetSerializedFields(value.Type()) 415 if err != nil { 416 return err 417 } 418 for _, fieldIndex := range serializedFields { // Go through all fields of this struct that are serialized 419 if err := c.marshal(value.Field(fieldIndex), p, typeStack); err != nil { // Serialize the field and write to byte array 420 return err 421 } 422 } 423 return nil 424 case reflect.Map: 425 keys := value.MapKeys() 426 numElts := len(keys) 427 if numElts > math.MaxInt32 { 428 return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", 429 codec.ErrMaxSliceLenExceeded, 430 numElts, 431 math.MaxInt32, 432 ) 433 } 434 p.PackInt(uint32(numElts)) // pack # elements 435 if p.Err != nil { 436 return p.Err 437 } 438 439 // pack key-value pairs sorted by increasing key 440 type keyTuple struct { 441 key reflect.Value 442 startIndex int 443 endIndex int 444 } 445 446 sortedKeys := make([]keyTuple, len(keys)) 447 startOffset := p.Offset 448 endOffset := p.Offset 449 for i, key := range keys { 450 if err := c.marshal(key, p, typeStack); err != nil { 451 return err 452 } 453 if p.Err != nil { 454 return fmt.Errorf("couldn't marshal map key %+v: %w ", key, p.Err) 455 } 456 sortedKeys[i] = keyTuple{ 457 key: key, 458 startIndex: endOffset, 459 endIndex: p.Offset, 460 } 461 endOffset = p.Offset 462 } 463 464 slices.SortFunc(sortedKeys, func(a, b keyTuple) int { 465 aBytes := p.Bytes[a.startIndex:a.endIndex] 466 bBytes := p.Bytes[b.startIndex:b.endIndex] 467 return bytes.Compare(aBytes, bBytes) 468 }) 469 470 allKeyBytes := slices.Clone(p.Bytes[startOffset:p.Offset]) 471 p.Offset = startOffset 472 for _, key := range sortedKeys { 473 keyStartOffset := p.Offset 474 475 // pack key 476 startIndex := key.startIndex - startOffset 477 endIndex := key.endIndex - startOffset 478 keyBytes := allKeyBytes[startIndex:endIndex] 479 p.PackFixedBytes(keyBytes) 480 if p.Err != nil { 481 return p.Err 482 } 483 484 // serialize and pack value 485 if err := c.marshal(value.MapIndex(key.key), p, typeStack); err != nil { 486 return err 487 } 488 if keyStartOffset == p.Offset { 489 return fmt.Errorf("couldn't marshal map with zero length entries: %w", codec.ErrMarshalZeroLength) 490 } 491 } 492 493 return nil 494 default: 495 return fmt.Errorf("%w: %s", codec.ErrUnsupportedType, valueKind) 496 } 497 } 498 499 // Unmarshal unmarshals [bytes] into [dest], where [dest] must be a pointer or 500 // interface 501 func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { 502 if dest == nil { 503 return codec.ErrUnmarshalNil 504 } 505 506 p := wrappers.Packer{ 507 Bytes: bytes, 508 } 509 destPtr := reflect.ValueOf(dest) 510 if destPtr.Kind() != reflect.Ptr { 511 return errNeedPointer 512 } 513 if err := c.unmarshal(&p, destPtr.Elem(), nil /*=typeStack*/); err != nil { 514 return err 515 } 516 if p.Offset != len(bytes) { 517 return fmt.Errorf("%w: read %d provided %d", 518 codec.ErrExtraSpace, 519 p.Offset, 520 len(bytes), 521 ) 522 } 523 return nil 524 } 525 526 // Unmarshal from p.Bytes into [value]. [value] must be addressable. 527 // 528 // c.lock should be held for the duration of this function 529 func (c *genericCodec) unmarshal( 530 p *wrappers.Packer, 531 value reflect.Value, 532 typeStack set.Set[reflect.Type], 533 ) error { 534 switch value.Kind() { 535 case reflect.Uint8: 536 value.SetUint(uint64(p.UnpackByte())) 537 if p.Err != nil { 538 return fmt.Errorf("couldn't unmarshal uint8: %w", p.Err) 539 } 540 return nil 541 case reflect.Int8: 542 value.SetInt(int64(p.UnpackByte())) 543 if p.Err != nil { 544 return fmt.Errorf("couldn't unmarshal int8: %w", p.Err) 545 } 546 return nil 547 case reflect.Uint16: 548 value.SetUint(uint64(p.UnpackShort())) 549 if p.Err != nil { 550 return fmt.Errorf("couldn't unmarshal uint16: %w", p.Err) 551 } 552 return nil 553 case reflect.Int16: 554 value.SetInt(int64(p.UnpackShort())) 555 if p.Err != nil { 556 return fmt.Errorf("couldn't unmarshal int16: %w", p.Err) 557 } 558 return nil 559 case reflect.Uint32: 560 value.SetUint(uint64(p.UnpackInt())) 561 if p.Err != nil { 562 return fmt.Errorf("couldn't unmarshal uint32: %w", p.Err) 563 } 564 return nil 565 case reflect.Int32: 566 value.SetInt(int64(p.UnpackInt())) 567 if p.Err != nil { 568 return fmt.Errorf("couldn't unmarshal int32: %w", p.Err) 569 } 570 return nil 571 case reflect.Uint64: 572 value.SetUint(p.UnpackLong()) 573 if p.Err != nil { 574 return fmt.Errorf("couldn't unmarshal uint64: %w", p.Err) 575 } 576 return nil 577 case reflect.Int64: 578 value.SetInt(int64(p.UnpackLong())) 579 if p.Err != nil { 580 return fmt.Errorf("couldn't unmarshal int64: %w", p.Err) 581 } 582 return nil 583 case reflect.Bool: 584 value.SetBool(p.UnpackBool()) 585 if p.Err != nil { 586 return fmt.Errorf("couldn't unmarshal bool: %w", p.Err) 587 } 588 return nil 589 case reflect.Slice: 590 numElts32 := p.UnpackInt() 591 if p.Err != nil { 592 return fmt.Errorf("couldn't unmarshal slice: %w", p.Err) 593 } 594 if numElts32 > math.MaxInt32 { 595 return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", 596 codec.ErrMaxSliceLenExceeded, 597 numElts32, 598 math.MaxInt32, 599 ) 600 } 601 numElts := int(numElts32) 602 603 sliceType := value.Type() 604 innerType := sliceType.Elem() 605 606 // If this is a slice of bytes, manually unpack the bytes rather 607 // than calling unmarshal on each byte. This improves performance. 608 if elemKind := innerType.Kind(); elemKind == reflect.Uint8 { 609 value.SetBytes(p.UnpackFixedBytes(numElts)) 610 return p.Err 611 } 612 // Unmarshal each element and append it into the slice. 613 value.Set(reflect.MakeSlice(sliceType, 0, initialSliceLen)) 614 zeroValue := reflect.Zero(innerType) 615 for i := 0; i < numElts; i++ { 616 value.Set(reflect.Append(value, zeroValue)) 617 618 startOffset := p.Offset 619 if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { 620 return err 621 } 622 if startOffset == p.Offset { 623 return fmt.Errorf("couldn't unmarshal slice of zero length values: %w", codec.ErrUnmarshalZeroLength) 624 } 625 } 626 return nil 627 case reflect.Array: 628 numElts := value.Len() 629 if elemKind := value.Type().Elem().Kind(); elemKind == reflect.Uint8 { 630 unpackedBytes := p.UnpackFixedBytes(numElts) 631 if p.Errored() { 632 return p.Err 633 } 634 // Get a slice to the underlying array value 635 underlyingSlice := value.Slice(0, numElts).Interface().([]byte) 636 copy(underlyingSlice, unpackedBytes) 637 return nil 638 } 639 for i := 0; i < numElts; i++ { 640 if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { 641 return err 642 } 643 } 644 return nil 645 case reflect.String: 646 value.SetString(p.UnpackStr()) 647 if p.Err != nil { 648 return fmt.Errorf("couldn't unmarshal string: %w", p.Err) 649 } 650 return nil 651 case reflect.Interface: 652 intfImplementor, err := c.typer.UnpackPrefix(p, value.Type()) 653 if err != nil { 654 return err 655 } 656 intfImplementorType := intfImplementor.Type() 657 if typeStack.Contains(intfImplementorType) { 658 return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, intfImplementorType) 659 } 660 typeStack.Add(intfImplementorType) 661 662 // Unmarshal into the struct 663 if err := c.unmarshal(p, intfImplementor, typeStack); err != nil { 664 return err 665 } 666 667 typeStack.Remove(intfImplementorType) 668 value.Set(intfImplementor) 669 return nil 670 case reflect.Struct: 671 // Get indices of fields that will be unmarshaled into 672 serializedFieldIndices, err := c.fielder.GetSerializedFields(value.Type()) 673 if err != nil { 674 return fmt.Errorf("couldn't unmarshal struct: %w", err) 675 } 676 // Go through the fields and unmarshal into them 677 for _, fieldIndex := range serializedFieldIndices { 678 if err := c.unmarshal(p, value.Field(fieldIndex), typeStack); err != nil { 679 return err 680 } 681 } 682 return nil 683 case reflect.Ptr: 684 // Get the type this pointer points to 685 t := value.Type().Elem() 686 // Create a new pointer to a new value of the underlying type 687 v := reflect.New(t) 688 // Fill the value 689 if err := c.unmarshal(p, v.Elem(), typeStack); err != nil { 690 return err 691 } 692 // Assign to the top-level struct's member 693 value.Set(v) 694 return nil 695 case reflect.Map: 696 numElts32 := p.UnpackInt() 697 if p.Err != nil { 698 return fmt.Errorf("couldn't unmarshal map: %w", p.Err) 699 } 700 if numElts32 > math.MaxInt32 { 701 return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", 702 codec.ErrMaxSliceLenExceeded, 703 numElts32, 704 math.MaxInt32, 705 ) 706 } 707 708 var ( 709 numElts = int(numElts32) 710 mapType = value.Type() 711 mapKeyType = mapType.Key() 712 mapValueType = mapType.Elem() 713 prevKey []byte 714 ) 715 716 // Set [value] to be a new map of the appropriate type. 717 value.Set(reflect.MakeMap(mapType)) 718 719 for i := 0; i < numElts; i++ { 720 mapKey := reflect.New(mapKeyType).Elem() 721 722 keyStartOffset := p.Offset 723 724 if err := c.unmarshal(p, mapKey, typeStack); err != nil { 725 return err 726 } 727 728 // Get the key's byte representation and check that the new key is 729 // actually bigger (according to bytes.Compare) than the previous 730 // key. 731 // 732 // We do this to enforce that key-value pairs are sorted by 733 // increasing key. 734 keyBytes := p.Bytes[keyStartOffset:p.Offset] 735 if i != 0 && bytes.Compare(keyBytes, prevKey) <= 0 { 736 return fmt.Errorf("keys aren't sorted: (%s, %s)", prevKey, mapKey) 737 } 738 prevKey = keyBytes 739 740 // Get the value 741 mapValue := reflect.New(mapValueType).Elem() 742 if err := c.unmarshal(p, mapValue, typeStack); err != nil { 743 return err 744 } 745 if keyStartOffset == p.Offset { 746 return fmt.Errorf("couldn't unmarshal map with zero length entries: %w", codec.ErrUnmarshalZeroLength) 747 } 748 749 // Assign the key-value pair in the map 750 value.SetMapIndex(mapKey, mapValue) 751 } 752 753 return nil 754 default: 755 return fmt.Errorf("can't unmarshal unknown type %s", value.Kind().String()) 756 } 757 }