inet.af/netstack@v0.0.0-20220214151720-7585b01ddccf/state/encode.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package state 16 17 import ( 18 "context" 19 "reflect" 20 "sort" 21 22 "inet.af/netstack/state/wire" 23 ) 24 25 // objectEncodeState the type and identity of an object occupying a memory 26 // address range. This is the value type for addrSet, and the intrusive entry 27 // for the deferred list. 28 type objectEncodeState struct { 29 // id is the assigned ID for this object. 30 id objectID 31 32 // obj is the object value. Note that this may be replaced if we 33 // encounter an object that contains this object. When this happens (in 34 // resolve), we will update existing references approprately, below, 35 // and defer a re-encoding of the object. 36 obj reflect.Value 37 38 // encoded is the encoded value of this object. Note that this may not 39 // be up to date if this object is still in the deferred list. 40 encoded wire.Object 41 42 // how indicates whether this object should be encoded as a value. This 43 // is used only for deferred encoding. 44 how encodeStrategy 45 46 // refs are the list of reference objects used by other objects 47 // referring to this object. When the object is updated, these 48 // references may be updated directly and automatically. 49 refs []*wire.Ref 50 51 deferredEntry 52 } 53 54 // encodeState is state used for encoding. 55 // 56 // The encoding process constructs a representation of the in-memory graph of 57 // objects before a single object is serialized. This is done to ensure that 58 // all references can be fully disambiguated. See resolve for more details. 59 type encodeState struct { 60 // ctx is the encode context. 61 ctx context.Context 62 63 // w is the output stream. 64 w wire.Writer 65 66 // types is the type database. 67 types typeEncodeDatabase 68 69 // lastID is the last allocated object ID. 70 lastID objectID 71 72 // values tracks the address ranges occupied by objects, along with the 73 // types of these objects. This is used to locate pointer targets, 74 // including pointers to fields within another type. 75 // 76 // Multiple objects may overlap in memory iff the larger object fully 77 // contains the smaller one, and the type of the smaller object matches 78 // a field or array element's type at the appropriate offset. An 79 // arbitrary number of objects may be nested in this manner. 80 // 81 // Note that this does not track zero-sized objects, those are tracked 82 // by zeroValues below. 83 values addrSet 84 85 // zeroValues tracks zero-sized objects. 86 zeroValues map[reflect.Type]*objectEncodeState 87 88 // deferred is the list of objects to be encoded. 89 deferred deferredList 90 91 // pendingTypes is the list of types to be serialized. Serialization 92 // will occur when all objects have been encoded, but before pending is 93 // serialized. 94 pendingTypes []wire.Type 95 96 // pending maps object IDs to objects to be serialized. Serialization does 97 // not actually occur until the full object graph is computed. 98 pending map[objectID]*objectEncodeState 99 100 // encodedStructs maps reflect.Values representing structs to previous 101 // encodings of those structs. This is necessary to avoid duplicate calls 102 // to SaverLoader.StateSave() that may result in multiple calls to 103 // Sink.SaveValue() for a given field, resulting in object duplication. 104 encodedStructs map[reflect.Value]*wire.Struct 105 106 // stats tracks time data. 107 stats Stats 108 } 109 110 // isSameSizeParent returns true if child is a field value or element within 111 // parent. Only a struct or array can have a child value. 112 // 113 // isSameSizeParent deals with objects like this: 114 // 115 // struct child { 116 // // fields.. 117 // } 118 // 119 // struct parent { 120 // c child 121 // } 122 // 123 // var p parent 124 // record(&p.c) 125 // 126 // Here, &p and &p.c occupy the exact same address range. 127 // 128 // Or like this: 129 // 130 // struct child { 131 // // fields 132 // } 133 // 134 // var arr [1]parent 135 // record(&arr[0]) 136 // 137 // Similarly, &arr[0] and &arr[0].c have the exact same address range. 138 // 139 // Precondition: parent and child must occupy the same memory. 140 func isSameSizeParent(parent reflect.Value, childType reflect.Type) bool { 141 switch parent.Kind() { 142 case reflect.Struct: 143 for i := 0; i < parent.NumField(); i++ { 144 field := parent.Field(i) 145 if field.Type() == childType { 146 return true 147 } 148 // Recurse through any intermediate types. 149 if isSameSizeParent(field, childType) { 150 return true 151 } 152 // Does it make sense to keep going if the first field 153 // doesn't match? Yes, because there might be an 154 // arbitrary number of zero-sized fields before we get 155 // a match, and childType itself can be zero-sized. 156 } 157 return false 158 case reflect.Array: 159 // The only case where an array with more than one elements can 160 // return true is if childType is zero-sized. In such cases, 161 // it's ambiguous which element contains the match since a 162 // zero-sized child object fully fits in any of the zero-sized 163 // elements in an array... However since all elements are of 164 // the same type, we only need to check one element. 165 // 166 // For non-zero-sized childTypes, parent.Len() must be 1, but a 167 // combination of the precondition and an implicit comparison 168 // between the array element size and childType ensures this. 169 return parent.Len() > 0 && isSameSizeParent(parent.Index(0), childType) 170 default: 171 return false 172 } 173 } 174 175 // nextID returns the next valid ID. 176 func (es *encodeState) nextID() objectID { 177 es.lastID++ 178 return objectID(es.lastID) 179 } 180 181 // dummyAddr points to the dummy zero-sized address. 182 var dummyAddr = reflect.ValueOf(new(struct{})).Pointer() 183 184 // resolve records the address range occupied by an object. 185 func (es *encodeState) resolve(obj reflect.Value, ref *wire.Ref) { 186 addr := obj.Pointer() 187 188 // Is this a map pointer? Just record the single address. It is not 189 // possible to take any pointers into the map internals. 190 if obj.Kind() == reflect.Map { 191 if addr == 0 { 192 // Just leave the nil reference alone. This is fine, we 193 // may need to encode as a reference in this way. We 194 // return nil for our objectEncodeState so that anyone 195 // depending on this value knows there's nothing there. 196 return 197 } 198 seg, gap := es.values.Find(addr) 199 if seg.Ok() { 200 // Ensure the map types match. 201 existing := seg.Value() 202 if existing.obj.Type() != obj.Type() { 203 Failf("overlapping map objects at 0x%x: [new object] %#v [existing object type] %s", addr, obj, existing.obj) 204 } 205 206 // No sense recording refs, maps may not be replaced by 207 // covering objects, they are maximal. 208 ref.Root = wire.Uint(existing.id) 209 return 210 } 211 212 // Record the map. 213 r := addrRange{addr, addr + 1} 214 oes := &objectEncodeState{ 215 id: es.nextID(), 216 obj: obj, 217 how: encodeMapAsValue, 218 } 219 // Use Insert instead of InsertWithoutMergingUnchecked when race 220 // detection is enabled to get additional sanity-checking from Merge. 221 if !raceEnabled { 222 es.values.InsertWithoutMergingUnchecked(gap, r, oes) 223 } else { 224 es.values.Insert(gap, r, oes) 225 } 226 es.pending[oes.id] = oes 227 es.deferred.PushBack(oes) 228 229 // See above: no ref recording. 230 ref.Root = wire.Uint(oes.id) 231 return 232 } 233 234 // If not a map, then the object must be a pointer. 235 if obj.Kind() != reflect.Ptr { 236 Failf("attempt to record non-map and non-pointer object %#v", obj) 237 } 238 239 obj = obj.Elem() // Value from here. 240 241 // Is this a zero-sized type? 242 typ := obj.Type() 243 size := typ.Size() 244 if size == 0 { 245 if addr == dummyAddr { 246 // Zero-sized objects point to a dummy byte within the 247 // runtime. There's no sense recording this in the 248 // address map. We add this to the dedicated 249 // zeroValues. 250 // 251 // Note that zero-sized objects must be *true* 252 // zero-sized objects. They cannot be part of some 253 // larger object. In that case, they are assigned a 254 // 1-byte address at the end of the object. 255 oes, ok := es.zeroValues[typ] 256 if !ok { 257 oes = &objectEncodeState{ 258 id: es.nextID(), 259 obj: obj, 260 } 261 es.zeroValues[typ] = oes 262 es.pending[oes.id] = oes 263 es.deferred.PushBack(oes) 264 } 265 266 // There's also no sense tracking back references. We 267 // know that this is a true zero-sized object, and not 268 // part of a larger container, so it will not change. 269 ref.Root = wire.Uint(oes.id) 270 return 271 } 272 size = 1 // See above. 273 } 274 275 end := addr + size 276 r := addrRange{addr, end} 277 seg := es.values.LowerBoundSegment(addr) 278 var ( 279 oes *objectEncodeState 280 gap addrGapIterator 281 ) 282 283 // Does at least one previously-registered object overlap this one? 284 if seg.Ok() && seg.Start() < end { 285 existing := seg.Value() 286 287 if seg.Range() == r && typ == existing.obj.Type() { 288 // This exact object is already registered. Avoid the traversal and 289 // just return directly. We don't need to encode the type 290 // information or any dots here. 291 ref.Root = wire.Uint(existing.id) 292 existing.refs = append(existing.refs, ref) 293 return 294 } 295 296 if seg.Range().IsSupersetOf(r) && (seg.Range() != r || isSameSizeParent(existing.obj, typ)) { 297 // This object is contained within a previously-registered object. 298 // Perform traversal from the container to the new object. 299 ref.Root = wire.Uint(existing.id) 300 ref.Dots = traverse(existing.obj.Type(), typ, seg.Start(), addr) 301 ref.Type = es.findType(existing.obj.Type()) 302 existing.refs = append(existing.refs, ref) 303 return 304 } 305 306 // This object contains one or more previously-registered objects. 307 // Remove them and update existing references to use the new one. 308 oes := &objectEncodeState{ 309 // Reuse the root ID of the first contained element. 310 id: existing.id, 311 obj: obj, 312 } 313 type elementEncodeState struct { 314 addr uintptr 315 typ reflect.Type 316 refs []*wire.Ref 317 } 318 var ( 319 elems []elementEncodeState 320 gap addrGapIterator 321 ) 322 for { 323 // Each contained object should be completely contained within 324 // this one. 325 if raceEnabled && !r.IsSupersetOf(seg.Range()) { 326 Failf("containing object %#v does not contain existing object %#v", obj, existing.obj) 327 } 328 elems = append(elems, elementEncodeState{ 329 addr: seg.Start(), 330 typ: existing.obj.Type(), 331 refs: existing.refs, 332 }) 333 delete(es.pending, existing.id) 334 es.deferred.Remove(existing) 335 gap = es.values.Remove(seg) 336 seg = gap.NextSegment() 337 if !seg.Ok() || seg.Start() >= end { 338 break 339 } 340 existing = seg.Value() 341 } 342 wt := es.findType(typ) 343 for _, elem := range elems { 344 dots := traverse(typ, elem.typ, addr, elem.addr) 345 for _, ref := range elem.refs { 346 ref.Root = wire.Uint(oes.id) 347 ref.Dots = append(ref.Dots, dots...) 348 ref.Type = wt 349 } 350 oes.refs = append(oes.refs, elem.refs...) 351 } 352 // Finally register the new containing object. 353 if !raceEnabled { 354 es.values.InsertWithoutMergingUnchecked(gap, r, oes) 355 } else { 356 es.values.Insert(gap, r, oes) 357 } 358 es.pending[oes.id] = oes 359 es.deferred.PushBack(oes) 360 ref.Root = wire.Uint(oes.id) 361 oes.refs = append(oes.refs, ref) 362 return 363 } 364 365 // No existing object overlaps this one. Register a new object. 366 oes = &objectEncodeState{ 367 id: es.nextID(), 368 obj: obj, 369 } 370 if seg.Ok() { 371 gap = seg.PrevGap() 372 } else { 373 gap = es.values.LastGap() 374 } 375 if !raceEnabled { 376 es.values.InsertWithoutMergingUnchecked(gap, r, oes) 377 } else { 378 es.values.Insert(gap, r, oes) 379 } 380 es.pending[oes.id] = oes 381 es.deferred.PushBack(oes) 382 ref.Root = wire.Uint(oes.id) 383 oes.refs = append(oes.refs, ref) 384 } 385 386 // traverse searches for a target object within a root object, where the target 387 // object is a struct field or array element within root, with potentially 388 // multiple intervening types. traverse returns the set of field or element 389 // traversals required to reach the target. 390 // 391 // Note that for efficiency, traverse returns the dots in the reverse order. 392 // That is, the first traversal required will be the last element of the list. 393 // 394 // Precondition: The target object must lie completely within the range defined 395 // by [rootAddr, rootAddr + sizeof(rootType)]. 396 func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) []wire.Dot { 397 // Recursion base case: the types actually match. 398 if targetType == rootType && targetAddr == rootAddr { 399 return nil 400 } 401 402 switch rootType.Kind() { 403 case reflect.Struct: 404 offset := targetAddr - rootAddr 405 for i := rootType.NumField(); i > 0; i-- { 406 field := rootType.Field(i - 1) 407 // The first field from the end with an offset that is 408 // smaller than or equal to our address offset is where 409 // the target is located. Traverse from there. 410 if field.Offset <= offset { 411 dots := traverse(field.Type, targetType, rootAddr+field.Offset, targetAddr) 412 fieldName := wire.FieldName(field.Name) 413 return append(dots, &fieldName) 414 } 415 } 416 // Should never happen; the target should be reachable. 417 Failf("no field in root type %v contains target type %v", rootType, targetType) 418 419 case reflect.Array: 420 // Since arrays have homogenous types, all elements have the 421 // same size and we can compute where the target lives. This 422 // does not matter for the purpose of typing, but matters for 423 // the purpose of computing the address of the given index. 424 elemSize := int(rootType.Elem().Size()) 425 n := int(targetAddr-rootAddr) / elemSize // Relies on integer division rounding down. 426 if rootType.Len() < n { 427 Failf("traversal target of type %v @%x is beyond the end of the array type %v @%x with %v elements", 428 targetType, targetAddr, rootType, rootAddr, rootType.Len()) 429 } 430 dots := traverse(rootType.Elem(), targetType, rootAddr+uintptr(n*elemSize), targetAddr) 431 return append(dots, wire.Index(n)) 432 433 default: 434 // For any other type, there's no possibility of aliasing so if 435 // the types didn't match earlier then we have an addresss 436 // collision which shouldn't be possible at this point. 437 Failf("traverse failed for root type %v and target type %v", rootType, targetType) 438 } 439 panic("unreachable") 440 } 441 442 // encodeMap encodes a map. 443 func (es *encodeState) encodeMap(obj reflect.Value, dest *wire.Object) { 444 if obj.IsNil() { 445 // Because there is a difference between a nil map and an empty 446 // map, we need to not decode in the case of a truly nil map. 447 *dest = wire.Nil{} 448 return 449 } 450 l := obj.Len() 451 m := &wire.Map{ 452 Keys: make([]wire.Object, l), 453 Values: make([]wire.Object, l), 454 } 455 *dest = m 456 for i, k := range obj.MapKeys() { 457 v := obj.MapIndex(k) 458 // Map keys must be encoded using the full value because the 459 // type will be omitted after the first key. 460 es.encodeObject(k, encodeAsValue, &m.Keys[i]) 461 es.encodeObject(v, encodeAsValue, &m.Values[i]) 462 } 463 } 464 465 // objectEncoder is for encoding structs. 466 type objectEncoder struct { 467 // es is encodeState. 468 es *encodeState 469 470 // encoded is the encoded struct. 471 encoded *wire.Struct 472 } 473 474 // save is called by the public methods on Sink. 475 func (oe *objectEncoder) save(slot int, obj reflect.Value) { 476 fieldValue := oe.encoded.Field(slot) 477 oe.es.encodeObject(obj, encodeDefault, fieldValue) 478 } 479 480 // encodeStruct encodes a composite object. 481 func (es *encodeState) encodeStruct(obj reflect.Value, dest *wire.Object) { 482 if s, ok := es.encodedStructs[obj]; ok { 483 *dest = s 484 return 485 } 486 s := &wire.Struct{} 487 *dest = s 488 es.encodedStructs[obj] = s 489 490 // Ensure that the obj is addressable. There are two cases when it is 491 // not. First, is when this is dispatched via SaveValue. Second, when 492 // this is a map key as a struct. Either way, we need to make a copy to 493 // obtain an addressable value. 494 if !obj.CanAddr() { 495 localObj := reflect.New(obj.Type()) 496 localObj.Elem().Set(obj) 497 obj = localObj.Elem() 498 } 499 500 // Look the type up in the database. 501 te, ok := es.types.Lookup(obj.Type()) 502 if te == nil { 503 if obj.NumField() == 0 { 504 // Allow unregistered anonymous, empty structs. This 505 // will just return success without ever invoking the 506 // passed function. This uses the immutable EmptyStruct 507 // variable to prevent an allocation in this case. 508 // 509 // Note that this mechanism does *not* work for 510 // interfaces in general. So you can't dispatch 511 // non-registered empty structs via interfaces because 512 // then they can't be restored. 513 s.Alloc(0) 514 return 515 } 516 // We need a SaverLoader for struct types. 517 Failf("struct %T does not implement SaverLoader", obj.Interface()) 518 } 519 if !ok { 520 // Queue the type to be serialized. 521 es.pendingTypes = append(es.pendingTypes, te.Type) 522 } 523 524 // Invoke the provided saver. 525 s.TypeID = wire.TypeID(te.ID) 526 s.Alloc(len(te.Fields)) 527 oe := objectEncoder{ 528 es: es, 529 encoded: s, 530 } 531 es.stats.start(te.ID) 532 defer es.stats.done() 533 if sl, ok := obj.Addr().Interface().(SaverLoader); ok { 534 // Note: may be a registered empty struct which does not 535 // implement the saver/loader interfaces. 536 sl.StateSave(Sink{internal: oe}) 537 } 538 } 539 540 // encodeArray encodes an array. 541 func (es *encodeState) encodeArray(obj reflect.Value, dest *wire.Object) { 542 l := obj.Len() 543 a := &wire.Array{ 544 Contents: make([]wire.Object, l), 545 } 546 *dest = a 547 for i := 0; i < l; i++ { 548 // We need to encode the full value because arrays are encoded 549 // using the type information from only the first element. 550 es.encodeObject(obj.Index(i), encodeAsValue, &a.Contents[i]) 551 } 552 } 553 554 // findType recursively finds type information. 555 func (es *encodeState) findType(typ reflect.Type) wire.TypeSpec { 556 // First: check if this is a proper type. It's possible for pointers, 557 // slices, arrays, maps, etc to all have some different type. 558 te, ok := es.types.Lookup(typ) 559 if te != nil { 560 if !ok { 561 // See encodeStruct. 562 es.pendingTypes = append(es.pendingTypes, te.Type) 563 } 564 return wire.TypeID(te.ID) 565 } 566 567 switch typ.Kind() { 568 case reflect.Ptr: 569 return &wire.TypeSpecPointer{ 570 Type: es.findType(typ.Elem()), 571 } 572 case reflect.Slice: 573 return &wire.TypeSpecSlice{ 574 Type: es.findType(typ.Elem()), 575 } 576 case reflect.Array: 577 return &wire.TypeSpecArray{ 578 Count: wire.Uint(typ.Len()), 579 Type: es.findType(typ.Elem()), 580 } 581 case reflect.Map: 582 return &wire.TypeSpecMap{ 583 Key: es.findType(typ.Key()), 584 Value: es.findType(typ.Elem()), 585 } 586 default: 587 // After potentially chasing many pointers, the 588 // ultimate type of the object is not known. 589 Failf("type %q is not known", typ) 590 } 591 panic("unreachable") 592 } 593 594 // encodeInterface encodes an interface. 595 func (es *encodeState) encodeInterface(obj reflect.Value, dest *wire.Object) { 596 // Dereference the object. 597 obj = obj.Elem() 598 if !obj.IsValid() { 599 // Special case: the nil object. 600 *dest = &wire.Interface{ 601 Type: wire.TypeSpecNil{}, 602 Value: wire.Nil{}, 603 } 604 return 605 } 606 607 // Encode underlying object. 608 i := &wire.Interface{ 609 Type: es.findType(obj.Type()), 610 } 611 *dest = i 612 es.encodeObject(obj, encodeAsValue, &i.Value) 613 } 614 615 // isPrimitive returns true if this is a primitive object, or a composite 616 // object composed entirely of primitives. 617 func isPrimitiveZero(typ reflect.Type) bool { 618 switch typ.Kind() { 619 case reflect.Ptr: 620 // Pointers are always treated as primitive types because we 621 // won't encode directly from here. Returning true here won't 622 // prevent the object from being encoded correctly. 623 return true 624 case reflect.Bool: 625 return true 626 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 627 return true 628 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 629 return true 630 case reflect.Float32, reflect.Float64: 631 return true 632 case reflect.Complex64, reflect.Complex128: 633 return true 634 case reflect.String: 635 return true 636 case reflect.Slice: 637 // The slice itself a primitive, but not necessarily the array 638 // that points to. This is similar to a pointer. 639 return true 640 case reflect.Array: 641 // We cannot treat an array as a primitive, because it may be 642 // composed of structures or other things with side-effects. 643 return isPrimitiveZero(typ.Elem()) 644 case reflect.Interface: 645 // Since we now that this type is the zero type, the interface 646 // value must be zero. Therefore this is primitive. 647 return true 648 case reflect.Struct: 649 return false 650 case reflect.Map: 651 // The isPrimitiveZero function is called only on zero-types to 652 // see if it's safe to serialize. Since a zero map has no 653 // elements, it is safe to treat as a primitive. 654 return true 655 default: 656 Failf("unknown type %q", typ.Name()) 657 } 658 panic("unreachable") 659 } 660 661 // encodeStrategy is the strategy used for encodeObject. 662 type encodeStrategy int 663 664 const ( 665 // encodeDefault means types are encoded normally as references. 666 encodeDefault encodeStrategy = iota 667 668 // encodeAsValue means that types will never take short-circuited and 669 // will always be encoded as a normal value. 670 encodeAsValue 671 672 // encodeMapAsValue means that even maps will be fully encoded. 673 encodeMapAsValue 674 ) 675 676 // encodeObject encodes an object. 677 func (es *encodeState) encodeObject(obj reflect.Value, how encodeStrategy, dest *wire.Object) { 678 if how == encodeDefault && isPrimitiveZero(obj.Type()) && obj.IsZero() { 679 *dest = wire.Nil{} 680 return 681 } 682 switch obj.Kind() { 683 case reflect.Ptr: // Fast path: first. 684 r := new(wire.Ref) 685 *dest = r 686 if obj.IsNil() { 687 // May be in an array or elsewhere such that a value is 688 // required. So we encode as a reference to the zero 689 // object, which does not exist. Note that this has to 690 // be handled correctly in the decode path as well. 691 return 692 } 693 es.resolve(obj, r) 694 case reflect.Bool: 695 *dest = wire.Bool(obj.Bool()) 696 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 697 *dest = wire.Int(obj.Int()) 698 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 699 *dest = wire.Uint(obj.Uint()) 700 case reflect.Float32: 701 *dest = wire.Float32(obj.Float()) 702 case reflect.Float64: 703 *dest = wire.Float64(obj.Float()) 704 case reflect.Complex64: 705 c := wire.Complex64(obj.Complex()) 706 *dest = &c // Needs alloc. 707 case reflect.Complex128: 708 c := wire.Complex128(obj.Complex()) 709 *dest = &c // Needs alloc. 710 case reflect.String: 711 s := wire.String(obj.String()) 712 *dest = &s // Needs alloc. 713 case reflect.Array: 714 es.encodeArray(obj, dest) 715 case reflect.Slice: 716 s := &wire.Slice{ 717 Capacity: wire.Uint(obj.Cap()), 718 Length: wire.Uint(obj.Len()), 719 } 720 *dest = s 721 // Note that we do need to provide a wire.Slice type here as 722 // how is not encodeDefault. If this were the case, then it 723 // would have been caught by the IsZero check above and we 724 // would have just used wire.Nil{}. 725 if obj.IsNil() { 726 return 727 } 728 // Slices need pointer resolution. 729 es.resolve(arrayFromSlice(obj), &s.Ref) 730 case reflect.Interface: 731 es.encodeInterface(obj, dest) 732 case reflect.Struct: 733 es.encodeStruct(obj, dest) 734 case reflect.Map: 735 if how == encodeMapAsValue { 736 es.encodeMap(obj, dest) 737 return 738 } 739 r := new(wire.Ref) 740 *dest = r 741 es.resolve(obj, r) 742 default: 743 Failf("unknown object %#v", obj.Interface()) 744 panic("unreachable") 745 } 746 } 747 748 // Save serializes the object graph rooted at obj. 749 func (es *encodeState) Save(obj reflect.Value) { 750 es.stats.init() 751 defer es.stats.fini(func(id typeID) string { 752 return es.pendingTypes[id-1].Name 753 }) 754 755 // Resolve the first object, which should queue a pile of additional 756 // objects on the pending list. All queued objects should be fully 757 // resolved, and we should be able to serialize after this call. 758 var root wire.Ref 759 es.resolve(obj.Addr(), &root) 760 761 // Encode the graph. 762 var oes *objectEncodeState 763 if err := safely(func() { 764 for oes = es.deferred.Front(); oes != nil; oes = es.deferred.Front() { 765 // Remove and encode the object. Note that as a result 766 // of this encoding, the object may be enqueued on the 767 // deferred list yet again. That's expected, and why it 768 // is removed first. 769 es.deferred.Remove(oes) 770 es.encodeObject(oes.obj, oes.how, &oes.encoded) 771 } 772 }); err != nil { 773 // Include the object in the error message. 774 Failf("encoding error at object %#v: %w", oes.obj.Interface(), err) 775 } 776 777 // Check that we have objects to serialize. 778 if len(es.pending) == 0 { 779 Failf("pending is empty?") 780 } 781 782 // Write the header with the number of objects. 783 if err := WriteHeader(es.w, uint64(len(es.pending)), true); err != nil { 784 Failf("error writing header: %w", err) 785 } 786 787 // Serialize all pending types and pending objects. Note that we don't 788 // bother removing from this list as we walk it because that just 789 // wastes time. It will not change after this point. 790 if err := safely(func() { 791 for _, wt := range es.pendingTypes { 792 // Encode the type. 793 wire.Save(es.w, &wt) 794 } 795 // Emit objects in ID order. 796 ids := make([]objectID, 0, len(es.pending)) 797 for id := range es.pending { 798 ids = append(ids, id) 799 } 800 sort.Slice(ids, func(i, j int) bool { 801 return ids[i] < ids[j] 802 }) 803 for _, id := range ids { 804 // Encode the id. 805 wire.Save(es.w, wire.Uint(id)) 806 // Marshal the object. 807 oes := es.pending[id] 808 wire.Save(es.w, oes.encoded) 809 } 810 }); err != nil { 811 // Include the object and the error. 812 Failf("error serializing object %#v: %w", oes.encoded, err) 813 } 814 } 815 816 // objectFlag indicates that the length is a # of objects, rather than a raw 817 // byte length. When this is set on a length header in the stream, it may be 818 // decoded appropriately. 819 const objectFlag uint64 = 1 << 63 820 821 // WriteHeader writes a header. 822 // 823 // Each object written to the statefile should be prefixed with a header. In 824 // order to generate statefiles that play nicely with debugging tools, raw 825 // writes should be prefixed with a header with object set to false and the 826 // appropriate length. This will allow tools to skip these regions. 827 func WriteHeader(w wire.Writer, length uint64, object bool) error { 828 // Sanity check the length. 829 if length&objectFlag != 0 { 830 Failf("impossibly huge length: %d", length) 831 } 832 if object { 833 length |= objectFlag 834 } 835 836 // Write a header. 837 return safely(func() { 838 wire.SaveUint(w, length) 839 }) 840 } 841 842 // deferredMapper is for the deferred list. 843 type deferredMapper struct{} 844 845 func (deferredMapper) linkerFor(oes *objectEncodeState) *deferredEntry { return &oes.deferredEntry } 846 847 // addrSetFunctions is used by addrSet. 848 type addrSetFunctions struct{} 849 850 func (addrSetFunctions) MinKey() uintptr { 851 return 0 852 } 853 854 func (addrSetFunctions) MaxKey() uintptr { 855 return ^uintptr(0) 856 } 857 858 func (addrSetFunctions) ClearValue(val **objectEncodeState) { 859 *val = nil 860 } 861 862 func (addrSetFunctions) Merge(r1 addrRange, val1 *objectEncodeState, r2 addrRange, val2 *objectEncodeState) (*objectEncodeState, bool) { 863 if val1.obj == val2.obj { 864 // This, should never happen. It would indicate that the same 865 // object exists in two non-contiguous address ranges. Note 866 // that this assertion can only be triggered if the race 867 // detector is enabled. 868 Failf("unexpected merge in addrSet @ %v and %v: %#v and %#v", r1, r2, val1.obj, val2.obj) 869 } 870 // Reject the merge. 871 return val1, false 872 } 873 874 func (addrSetFunctions) Split(r addrRange, val *objectEncodeState, _ uintptr) (*objectEncodeState, *objectEncodeState) { 875 // A split should never happen: we don't remove ranges. 876 Failf("unexpected split in addrSet @ %v: %#v", r, val.obj) 877 panic("unreachable") 878 }