github.com/noisysockets/netstack@v0.6.0/pkg/state/encode.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package state
    16  
    17  import (
    18  	"context"
    19  	"io"
    20  	"reflect"
    21  	"sort"
    22  
    23  	"github.com/noisysockets/netstack/pkg/state/wire"
    24  )
    25  
    26  // objectEncodeState the type and identity of an object occupying a memory
    27  // address range. This is the value type for addrSet, and the intrusive entry
    28  // for the deferred list.
    29  type objectEncodeState struct {
    30  	// id is the assigned ID for this object.
    31  	id objectID
    32  
    33  	// obj is the object value. Note that this may be replaced if we
    34  	// encounter an object that contains this object. When this happens (in
    35  	// resolve), we will update existing references appropriately, below,
    36  	// and defer a re-encoding of the object.
    37  	obj reflect.Value
    38  
    39  	// encoded is the encoded value of this object. Note that this may not
    40  	// be up to date if this object is still in the deferred list.
    41  	encoded wire.Object
    42  
    43  	// how indicates whether this object should be encoded as a value. This
    44  	// is used only for deferred encoding.
    45  	how encodeStrategy
    46  
    47  	// refs are the list of reference objects used by other objects
    48  	// referring to this object. When the object is updated, these
    49  	// references may be updated directly and automatically.
    50  	refs []*wire.Ref
    51  
    52  	deferredEntry
    53  }
    54  
    55  // encodeState is state used for encoding.
    56  //
    57  // The encoding process constructs a representation of the in-memory graph of
    58  // objects before a single object is serialized. This is done to ensure that
    59  // all references can be fully disambiguated. See resolve for more details.
    60  type encodeState struct {
    61  	// ctx is the encode context.
    62  	ctx context.Context
    63  
    64  	// w is the output stream.
    65  	w io.Writer
    66  
    67  	// types is the type database.
    68  	types typeEncodeDatabase
    69  
    70  	// lastID is the last allocated object ID.
    71  	lastID objectID
    72  
    73  	// values tracks the address ranges occupied by objects, along with the
    74  	// types of these objects. This is used to locate pointer targets,
    75  	// including pointers to fields within another type.
    76  	//
    77  	// Multiple objects may overlap in memory iff the larger object fully
    78  	// contains the smaller one, and the type of the smaller object matches
    79  	// a field or array element's type at the appropriate offset. An
    80  	// arbitrary number of objects may be nested in this manner.
    81  	//
    82  	// Note that this does not track zero-sized objects, those are tracked
    83  	// by zeroValues below.
    84  	values addrSet
    85  
    86  	// zeroValues tracks zero-sized objects.
    87  	zeroValues map[reflect.Type]*objectEncodeState
    88  
    89  	// deferred is the list of objects to be encoded.
    90  	deferred deferredList
    91  
    92  	// pendingTypes is the list of types to be serialized. Serialization
    93  	// will occur when all objects have been encoded, but before pending is
    94  	// serialized.
    95  	pendingTypes []wire.Type
    96  
    97  	// pending maps object IDs to objects to be serialized. Serialization does
    98  	// not actually occur until the full object graph is computed.
    99  	pending map[objectID]*objectEncodeState
   100  
   101  	// encodedStructs maps reflect.Values representing structs to previous
   102  	// encodings of those structs. This is necessary to avoid duplicate calls
   103  	// to SaverLoader.StateSave() that may result in multiple calls to
   104  	// Sink.SaveValue() for a given field, resulting in object duplication.
   105  	encodedStructs map[reflect.Value]*wire.Struct
   106  
   107  	// stats tracks time data.
   108  	stats Stats
   109  }
   110  
   111  // isSameSizeParent returns true if child is a field value or element within
   112  // parent. Only a struct or array can have a child value.
   113  //
   114  // isSameSizeParent deals with objects like this:
   115  //
   116  //	struct child {
   117  //		// fields..
   118  //	}
   119  //
   120  //	struct parent {
   121  //		c child
   122  //	}
   123  //
   124  // var p parent
   125  // record(&p.c)
   126  //
   127  // Here, &p and &p.c occupy the exact same address range.
   128  //
   129  // Or like this:
   130  //
   131  //	struct child {
   132  //		// fields
   133  //	}
   134  //
   135  // var arr [1]parent
   136  // record(&arr[0])
   137  //
   138  // Similarly, &arr[0] and &arr[0].c have the exact same address range.
   139  //
   140  // Precondition: parent and child must occupy the same memory.
   141  func isSameSizeParent(parent reflect.Value, childType reflect.Type) bool {
   142  	switch parent.Kind() {
   143  	case reflect.Struct:
   144  		for i := 0; i < parent.NumField(); i++ {
   145  			field := parent.Field(i)
   146  			if field.Type() == childType {
   147  				return true
   148  			}
   149  			// Recurse through any intermediate types.
   150  			if isSameSizeParent(field, childType) {
   151  				return true
   152  			}
   153  			// Does it make sense to keep going if the first field
   154  			// doesn't match? Yes, because there might be an
   155  			// arbitrary number of zero-sized fields before we get
   156  			// a match, and childType itself can be zero-sized.
   157  		}
   158  		return false
   159  	case reflect.Array:
   160  		// The only case where an array with more than one elements can
   161  		// return true is if childType is zero-sized. In such cases,
   162  		// it's ambiguous which element contains the match since a
   163  		// zero-sized child object fully fits in any of the zero-sized
   164  		// elements in an array... However since all elements are of
   165  		// the same type, we only need to check one element.
   166  		//
   167  		// For non-zero-sized childTypes, parent.Len() must be 1, but a
   168  		// combination of the precondition and an implicit comparison
   169  		// between the array element size and childType ensures this.
   170  		return parent.Len() > 0 && isSameSizeParent(parent.Index(0), childType)
   171  	default:
   172  		return false
   173  	}
   174  }
   175  
   176  // nextID returns the next valid ID.
   177  func (es *encodeState) nextID() objectID {
   178  	es.lastID++
   179  	return objectID(es.lastID)
   180  }
   181  
   182  // dummyAddr points to the dummy zero-sized address.
   183  var dummyAddr = reflect.ValueOf(new(struct{})).Pointer()
   184  
   185  // resolve records the address range occupied by an object.
   186  func (es *encodeState) resolve(obj reflect.Value, ref *wire.Ref) {
   187  	addr := obj.Pointer()
   188  
   189  	// Is this a map pointer? Just record the single address. It is not
   190  	// possible to take any pointers into the map internals.
   191  	if obj.Kind() == reflect.Map {
   192  		if addr == 0 {
   193  			// Just leave the nil reference alone. This is fine, we
   194  			// may need to encode as a reference in this way. We
   195  			// return nil for our objectEncodeState so that anyone
   196  			// depending on this value knows there's nothing there.
   197  			return
   198  		}
   199  		seg, gap := es.values.Find(addr)
   200  		if seg.Ok() {
   201  			// Ensure the map types match.
   202  			existing := seg.Value()
   203  			if existing.obj.Type() != obj.Type() {
   204  				Failf("overlapping map objects at 0x%x: [new object] %#v [existing object type] %s", addr, obj, existing.obj)
   205  			}
   206  
   207  			// No sense recording refs, maps may not be replaced by
   208  			// covering objects, they are maximal.
   209  			ref.Root = wire.Uint(existing.id)
   210  			return
   211  		}
   212  
   213  		// Record the map.
   214  		r := addrRange{addr, addr + 1}
   215  		oes := &objectEncodeState{
   216  			id:  es.nextID(),
   217  			obj: obj,
   218  			how: encodeMapAsValue,
   219  		}
   220  		// Use Insert instead of InsertWithoutMergingUnchecked when race
   221  		// detection is enabled to get additional sanity-checking from Merge.
   222  		if !raceEnabled {
   223  			es.values.InsertWithoutMergingUnchecked(gap, r, oes)
   224  		} else {
   225  			es.values.Insert(gap, r, oes)
   226  		}
   227  		es.pending[oes.id] = oes
   228  		es.deferred.PushBack(oes)
   229  
   230  		// See above: no ref recording.
   231  		ref.Root = wire.Uint(oes.id)
   232  		return
   233  	}
   234  
   235  	// If not a map, then the object must be a pointer.
   236  	if obj.Kind() != reflect.Ptr {
   237  		Failf("attempt to record non-map and non-pointer object %#v", obj)
   238  	}
   239  
   240  	obj = obj.Elem() // Value from here.
   241  
   242  	// Is this a zero-sized type?
   243  	typ := obj.Type()
   244  	size := typ.Size()
   245  	if size == 0 {
   246  		if addr == dummyAddr {
   247  			// Zero-sized objects point to a dummy byte within the
   248  			// runtime.  There's no sense recording this in the
   249  			// address map.  We add this to the dedicated
   250  			// zeroValues.
   251  			//
   252  			// Note that zero-sized objects must be *true*
   253  			// zero-sized objects. They cannot be part of some
   254  			// larger object. In that case, they are assigned a
   255  			// 1-byte address at the end of the object.
   256  			oes, ok := es.zeroValues[typ]
   257  			if !ok {
   258  				oes = &objectEncodeState{
   259  					id:  es.nextID(),
   260  					obj: obj,
   261  				}
   262  				es.zeroValues[typ] = oes
   263  				es.pending[oes.id] = oes
   264  				es.deferred.PushBack(oes)
   265  			}
   266  
   267  			// There's also no sense tracking back references. We
   268  			// know that this is a true zero-sized object, and not
   269  			// part of a larger container, so it will not change.
   270  			ref.Root = wire.Uint(oes.id)
   271  			return
   272  		}
   273  		size = 1 // See above.
   274  	}
   275  
   276  	end := addr + size
   277  	r := addrRange{addr, end}
   278  	seg := es.values.LowerBoundSegment(addr)
   279  	var (
   280  		oes *objectEncodeState
   281  		gap addrGapIterator
   282  	)
   283  
   284  	// Does at least one previously-registered object overlap this one?
   285  	if seg.Ok() && seg.Start() < end {
   286  		existing := seg.Value()
   287  
   288  		if seg.Range() == r && typ == existing.obj.Type() {
   289  			// This exact object is already registered. Avoid the traversal and
   290  			// just return directly. We don't need to encode the type
   291  			// information or any dots here.
   292  			ref.Root = wire.Uint(existing.id)
   293  			existing.refs = append(existing.refs, ref)
   294  			return
   295  		}
   296  
   297  		if seg.Range().IsSupersetOf(r) && (seg.Range() != r || isSameSizeParent(existing.obj, typ)) {
   298  			// This object is contained within a previously-registered object.
   299  			// Perform traversal from the container to the new object.
   300  			ref.Root = wire.Uint(existing.id)
   301  			ref.Dots = traverse(existing.obj.Type(), typ, seg.Start(), addr)
   302  			ref.Type = es.findType(existing.obj.Type())
   303  			existing.refs = append(existing.refs, ref)
   304  			return
   305  		}
   306  
   307  		// This object contains one or more previously-registered objects.
   308  		// Remove them and update existing references to use the new one.
   309  		oes := &objectEncodeState{
   310  			// Reuse the root ID of the first contained element.
   311  			id:  existing.id,
   312  			obj: obj,
   313  		}
   314  		type elementEncodeState struct {
   315  			addr uintptr
   316  			typ  reflect.Type
   317  			refs []*wire.Ref
   318  		}
   319  		var (
   320  			elems []elementEncodeState
   321  			gap   addrGapIterator
   322  		)
   323  		for {
   324  			// Each contained object should be completely contained within
   325  			// this one.
   326  			if raceEnabled && !r.IsSupersetOf(seg.Range()) {
   327  				Failf("containing object %#v does not contain existing object %#v", obj, existing.obj)
   328  			}
   329  			elems = append(elems, elementEncodeState{
   330  				addr: seg.Start(),
   331  				typ:  existing.obj.Type(),
   332  				refs: existing.refs,
   333  			})
   334  			delete(es.pending, existing.id)
   335  			es.deferred.Remove(existing)
   336  			gap = es.values.Remove(seg)
   337  			seg = gap.NextSegment()
   338  			if !seg.Ok() || seg.Start() >= end {
   339  				break
   340  			}
   341  			existing = seg.Value()
   342  		}
   343  		wt := es.findType(typ)
   344  		for _, elem := range elems {
   345  			dots := traverse(typ, elem.typ, addr, elem.addr)
   346  			for _, ref := range elem.refs {
   347  				ref.Root = wire.Uint(oes.id)
   348  				ref.Dots = append(ref.Dots, dots...)
   349  				ref.Type = wt
   350  			}
   351  			oes.refs = append(oes.refs, elem.refs...)
   352  		}
   353  		// Finally register the new containing object.
   354  		if !raceEnabled {
   355  			es.values.InsertWithoutMergingUnchecked(gap, r, oes)
   356  		} else {
   357  			es.values.Insert(gap, r, oes)
   358  		}
   359  		es.pending[oes.id] = oes
   360  		es.deferred.PushBack(oes)
   361  		ref.Root = wire.Uint(oes.id)
   362  		oes.refs = append(oes.refs, ref)
   363  		return
   364  	}
   365  
   366  	// No existing object overlaps this one. Register a new object.
   367  	oes = &objectEncodeState{
   368  		id:  es.nextID(),
   369  		obj: obj,
   370  	}
   371  	if seg.Ok() {
   372  		gap = seg.PrevGap()
   373  	} else {
   374  		gap = es.values.LastGap()
   375  	}
   376  	if !raceEnabled {
   377  		es.values.InsertWithoutMergingUnchecked(gap, r, oes)
   378  	} else {
   379  		es.values.Insert(gap, r, oes)
   380  	}
   381  	es.pending[oes.id] = oes
   382  	es.deferred.PushBack(oes)
   383  	ref.Root = wire.Uint(oes.id)
   384  	oes.refs = append(oes.refs, ref)
   385  }
   386  
   387  // traverse searches for a target object within a root object, where the target
   388  // object is a struct field or array element within root, with potentially
   389  // multiple intervening types. traverse returns the set of field or element
   390  // traversals required to reach the target.
   391  //
   392  // Note that for efficiency, traverse returns the dots in the reverse order.
   393  // That is, the first traversal required will be the last element of the list.
   394  //
   395  // Precondition: The target object must lie completely within the range defined
   396  // by [rootAddr, rootAddr + sizeof(rootType)].
   397  func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) []wire.Dot {
   398  	// Recursion base case: the types actually match.
   399  	if targetType == rootType && targetAddr == rootAddr {
   400  		return nil
   401  	}
   402  
   403  	switch rootType.Kind() {
   404  	case reflect.Struct:
   405  		offset := targetAddr - rootAddr
   406  		for i := rootType.NumField(); i > 0; i-- {
   407  			field := rootType.Field(i - 1)
   408  			// The first field from the end with an offset that is
   409  			// smaller than or equal to our address offset is where
   410  			// the target is located. Traverse from there.
   411  			if field.Offset <= offset {
   412  				dots := traverse(field.Type, targetType, rootAddr+field.Offset, targetAddr)
   413  				fieldName := wire.FieldName(field.Name)
   414  				return append(dots, &fieldName)
   415  			}
   416  		}
   417  		// Should never happen; the target should be reachable.
   418  		Failf("no field in root type %v contains target type %v", rootType, targetType)
   419  
   420  	case reflect.Array:
   421  		// Since arrays have homogeneous types, all elements have the
   422  		// same size and we can compute where the target lives. This
   423  		// does not matter for the purpose of typing, but matters for
   424  		// the purpose of computing the address of the given index.
   425  		elemSize := int(rootType.Elem().Size())
   426  		n := int(targetAddr-rootAddr) / elemSize // Relies on integer division rounding down.
   427  		if rootType.Len() < n {
   428  			Failf("traversal target of type %v @%x is beyond the end of the array type %v @%x with %v elements",
   429  				targetType, targetAddr, rootType, rootAddr, rootType.Len())
   430  		}
   431  		dots := traverse(rootType.Elem(), targetType, rootAddr+uintptr(n*elemSize), targetAddr)
   432  		return append(dots, wire.Index(n))
   433  
   434  	default:
   435  		// For any other type, there's no possibility of aliasing so if
   436  		// the types didn't match earlier then we have an address
   437  		// collision which shouldn't be possible at this point.
   438  		Failf("traverse failed for root type %v and target type %v", rootType, targetType)
   439  	}
   440  	panic("unreachable")
   441  }
   442  
   443  // encodeMap encodes a map.
   444  func (es *encodeState) encodeMap(obj reflect.Value, dest *wire.Object) {
   445  	if obj.IsNil() {
   446  		// Because there is a difference between a nil map and an empty
   447  		// map, we need to not decode in the case of a truly nil map.
   448  		*dest = wire.Nil{}
   449  		return
   450  	}
   451  	l := obj.Len()
   452  	m := &wire.Map{
   453  		Keys:   make([]wire.Object, l),
   454  		Values: make([]wire.Object, l),
   455  	}
   456  	*dest = m
   457  	for i, k := range obj.MapKeys() {
   458  		v := obj.MapIndex(k)
   459  		// Map keys must be encoded using the full value because the
   460  		// type will be omitted after the first key.
   461  		es.encodeObject(k, encodeAsValue, &m.Keys[i])
   462  		es.encodeObject(v, encodeAsValue, &m.Values[i])
   463  	}
   464  }
   465  
   466  // objectEncoder is for encoding structs.
   467  type objectEncoder struct {
   468  	// es is encodeState.
   469  	es *encodeState
   470  
   471  	// encoded is the encoded struct.
   472  	encoded *wire.Struct
   473  }
   474  
   475  // save is called by the public methods on Sink.
   476  func (oe *objectEncoder) save(slot int, obj reflect.Value) {
   477  	fieldValue := oe.encoded.Field(slot)
   478  	oe.es.encodeObject(obj, encodeDefault, fieldValue)
   479  }
   480  
   481  // encodeStruct encodes a composite object.
   482  func (es *encodeState) encodeStruct(obj reflect.Value, dest *wire.Object) {
   483  	if s, ok := es.encodedStructs[obj]; ok {
   484  		*dest = s
   485  		return
   486  	}
   487  	s := &wire.Struct{}
   488  	*dest = s
   489  	es.encodedStructs[obj] = s
   490  
   491  	// Ensure that the obj is addressable. There are two cases when it is
   492  	// not. First, is when this is dispatched via SaveValue. Second, when
   493  	// this is a map key as a struct. Either way, we need to make a copy to
   494  	// obtain an addressable value.
   495  	if !obj.CanAddr() {
   496  		localObj := reflect.New(obj.Type())
   497  		localObj.Elem().Set(obj)
   498  		obj = localObj.Elem()
   499  	}
   500  
   501  	// Look the type up in the database.
   502  	te, ok := es.types.Lookup(obj.Type())
   503  	if te == nil {
   504  		if obj.NumField() == 0 {
   505  			// Allow unregistered anonymous, empty structs. This
   506  			// will just return success without ever invoking the
   507  			// passed function. This uses the immutable EmptyStruct
   508  			// variable to prevent an allocation in this case.
   509  			//
   510  			// Note that this mechanism does *not* work for
   511  			// interfaces in general. So you can't dispatch
   512  			// non-registered empty structs via interfaces because
   513  			// then they can't be restored.
   514  			s.Alloc(0)
   515  			return
   516  		}
   517  		// We need a SaverLoader for struct types.
   518  		Failf("struct %T does not implement SaverLoader", obj.Interface())
   519  	}
   520  	if !ok {
   521  		// Queue the type to be serialized.
   522  		es.pendingTypes = append(es.pendingTypes, te.Type)
   523  	}
   524  
   525  	// Invoke the provided saver.
   526  	s.TypeID = wire.TypeID(te.ID)
   527  	s.Alloc(len(te.Fields))
   528  	oe := objectEncoder{
   529  		es:      es,
   530  		encoded: s,
   531  	}
   532  	es.stats.start(te.ID)
   533  	defer es.stats.done()
   534  	if sl, ok := obj.Addr().Interface().(SaverLoader); ok {
   535  		// Note: may be a registered empty struct which does not
   536  		// implement the saver/loader interfaces.
   537  		sl.StateSave(Sink{internal: oe})
   538  	}
   539  }
   540  
   541  // encodeArray encodes an array.
   542  func (es *encodeState) encodeArray(obj reflect.Value, dest *wire.Object) {
   543  	l := obj.Len()
   544  	a := &wire.Array{
   545  		Contents: make([]wire.Object, l),
   546  	}
   547  	*dest = a
   548  	for i := 0; i < l; i++ {
   549  		// We need to encode the full value because arrays are encoded
   550  		// using the type information from only the first element.
   551  		es.encodeObject(obj.Index(i), encodeAsValue, &a.Contents[i])
   552  	}
   553  }
   554  
   555  // findType recursively finds type information.
   556  func (es *encodeState) findType(typ reflect.Type) wire.TypeSpec {
   557  	// First: check if this is a proper type. It's possible for pointers,
   558  	// slices, arrays, maps, etc to all have some different type.
   559  	te, ok := es.types.Lookup(typ)
   560  	if te != nil {
   561  		if !ok {
   562  			// See encodeStruct.
   563  			es.pendingTypes = append(es.pendingTypes, te.Type)
   564  		}
   565  		return wire.TypeID(te.ID)
   566  	}
   567  
   568  	switch typ.Kind() {
   569  	case reflect.Ptr:
   570  		return &wire.TypeSpecPointer{
   571  			Type: es.findType(typ.Elem()),
   572  		}
   573  	case reflect.Slice:
   574  		return &wire.TypeSpecSlice{
   575  			Type: es.findType(typ.Elem()),
   576  		}
   577  	case reflect.Array:
   578  		return &wire.TypeSpecArray{
   579  			Count: wire.Uint(typ.Len()),
   580  			Type:  es.findType(typ.Elem()),
   581  		}
   582  	case reflect.Map:
   583  		return &wire.TypeSpecMap{
   584  			Key:   es.findType(typ.Key()),
   585  			Value: es.findType(typ.Elem()),
   586  		}
   587  	default:
   588  		// After potentially chasing many pointers, the
   589  		// ultimate type of the object is not known.
   590  		Failf("type %q is not known", typ)
   591  	}
   592  	panic("unreachable")
   593  }
   594  
   595  // encodeInterface encodes an interface.
   596  func (es *encodeState) encodeInterface(obj reflect.Value, dest *wire.Object) {
   597  	// Dereference the object.
   598  	obj = obj.Elem()
   599  	if !obj.IsValid() {
   600  		// Special case: the nil object.
   601  		*dest = &wire.Interface{
   602  			Type:  wire.TypeSpecNil{},
   603  			Value: wire.Nil{},
   604  		}
   605  		return
   606  	}
   607  
   608  	// Encode underlying object.
   609  	i := &wire.Interface{
   610  		Type: es.findType(obj.Type()),
   611  	}
   612  	*dest = i
   613  	es.encodeObject(obj, encodeAsValue, &i.Value)
   614  }
   615  
   616  // isPrimitive returns true if this is a primitive object, or a composite
   617  // object composed entirely of primitives.
   618  func isPrimitiveZero(typ reflect.Type) bool {
   619  	switch typ.Kind() {
   620  	case reflect.Ptr:
   621  		// Pointers are always treated as primitive types because we
   622  		// won't encode directly from here. Returning true here won't
   623  		// prevent the object from being encoded correctly.
   624  		return true
   625  	case reflect.Bool:
   626  		return true
   627  	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
   628  		return true
   629  	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
   630  		return true
   631  	case reflect.Float32, reflect.Float64:
   632  		return true
   633  	case reflect.Complex64, reflect.Complex128:
   634  		return true
   635  	case reflect.String:
   636  		return true
   637  	case reflect.Slice:
   638  		// The slice itself a primitive, but not necessarily the array
   639  		// that points to. This is similar to a pointer.
   640  		return true
   641  	case reflect.Array:
   642  		// We cannot treat an array as a primitive, because it may be
   643  		// composed of structures or other things with side-effects.
   644  		return isPrimitiveZero(typ.Elem())
   645  	case reflect.Interface:
   646  		// Since we now that this type is the zero type, the interface
   647  		// value must be zero. Therefore this is primitive.
   648  		return true
   649  	case reflect.Struct:
   650  		return false
   651  	case reflect.Map:
   652  		// The isPrimitiveZero function is called only on zero-types to
   653  		// see if it's safe to serialize. Since a zero map has no
   654  		// elements, it is safe to treat as a primitive.
   655  		return true
   656  	default:
   657  		Failf("unknown type %q", typ.Name())
   658  	}
   659  	panic("unreachable")
   660  }
   661  
   662  // encodeStrategy is the strategy used for encodeObject.
   663  type encodeStrategy int
   664  
   665  const (
   666  	// encodeDefault means types are encoded normally as references.
   667  	encodeDefault encodeStrategy = iota
   668  
   669  	// encodeAsValue means that types will never take short-circuited and
   670  	// will always be encoded as a normal value.
   671  	encodeAsValue
   672  
   673  	// encodeMapAsValue means that even maps will be fully encoded.
   674  	encodeMapAsValue
   675  )
   676  
   677  // encodeObject encodes an object.
   678  func (es *encodeState) encodeObject(obj reflect.Value, how encodeStrategy, dest *wire.Object) {
   679  	if how == encodeDefault && isPrimitiveZero(obj.Type()) && obj.IsZero() {
   680  		*dest = wire.Nil{}
   681  		return
   682  	}
   683  	switch obj.Kind() {
   684  	case reflect.Ptr: // Fast path: first.
   685  		r := new(wire.Ref)
   686  		*dest = r
   687  		if obj.IsNil() {
   688  			// May be in an array or elsewhere such that a value is
   689  			// required. So we encode as a reference to the zero
   690  			// object, which does not exist. Note that this has to
   691  			// be handled correctly in the decode path as well.
   692  			return
   693  		}
   694  		es.resolve(obj, r)
   695  	case reflect.Bool:
   696  		*dest = wire.Bool(obj.Bool())
   697  	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
   698  		*dest = wire.Int(obj.Int())
   699  	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
   700  		*dest = wire.Uint(obj.Uint())
   701  	case reflect.Float32:
   702  		*dest = wire.Float32(obj.Float())
   703  	case reflect.Float64:
   704  		*dest = wire.Float64(obj.Float())
   705  	case reflect.Complex64:
   706  		c := wire.Complex64(obj.Complex())
   707  		*dest = &c // Needs alloc.
   708  	case reflect.Complex128:
   709  		c := wire.Complex128(obj.Complex())
   710  		*dest = &c // Needs alloc.
   711  	case reflect.String:
   712  		s := wire.String(obj.String())
   713  		*dest = &s // Needs alloc.
   714  	case reflect.Array:
   715  		es.encodeArray(obj, dest)
   716  	case reflect.Slice:
   717  		s := &wire.Slice{
   718  			Capacity: wire.Uint(obj.Cap()),
   719  			Length:   wire.Uint(obj.Len()),
   720  		}
   721  		*dest = s
   722  		// Note that we do need to provide a wire.Slice type here as
   723  		// how is not encodeDefault. If this were the case, then it
   724  		// would have been caught by the IsZero check above and we
   725  		// would have just used wire.Nil{}.
   726  		if obj.IsNil() {
   727  			return
   728  		}
   729  		// Slices need pointer resolution.
   730  		es.resolve(arrayFromSlice(obj), &s.Ref)
   731  	case reflect.Interface:
   732  		es.encodeInterface(obj, dest)
   733  	case reflect.Struct:
   734  		es.encodeStruct(obj, dest)
   735  	case reflect.Map:
   736  		if how == encodeMapAsValue {
   737  			es.encodeMap(obj, dest)
   738  			return
   739  		}
   740  		r := new(wire.Ref)
   741  		*dest = r
   742  		es.resolve(obj, r)
   743  	default:
   744  		Failf("unknown object %#v", obj.Interface())
   745  		panic("unreachable")
   746  	}
   747  }
   748  
   749  // Save serializes the object graph rooted at obj.
   750  func (es *encodeState) Save(obj reflect.Value) {
   751  	es.stats.init()
   752  	defer es.stats.fini(func(id typeID) string {
   753  		return es.pendingTypes[id-1].Name
   754  	})
   755  
   756  	// Resolve the first object, which should queue a pile of additional
   757  	// objects on the pending list. All queued objects should be fully
   758  	// resolved, and we should be able to serialize after this call.
   759  	var root wire.Ref
   760  	es.resolve(obj.Addr(), &root)
   761  
   762  	// Encode the graph.
   763  	var oes *objectEncodeState
   764  	if err := safely(func() {
   765  		for oes = es.deferred.Front(); oes != nil; oes = es.deferred.Front() {
   766  			// Remove and encode the object. Note that as a result
   767  			// of this encoding, the object may be enqueued on the
   768  			// deferred list yet again. That's expected, and why it
   769  			// is removed first.
   770  			es.deferred.Remove(oes)
   771  			es.encodeObject(oes.obj, oes.how, &oes.encoded)
   772  		}
   773  	}); err != nil {
   774  		// Include the object in the error message.
   775  		Failf("encoding error at object %#v: %w", oes.obj.Interface(), err)
   776  	}
   777  
   778  	// Check that we have objects to serialize.
   779  	if len(es.pending) == 0 {
   780  		Failf("pending is empty?")
   781  	}
   782  
   783  	// Write the header with the number of objects.
   784  	if err := WriteHeader(es.w, uint64(len(es.pending)), true); err != nil {
   785  		Failf("error writing header: %w", err)
   786  	}
   787  
   788  	// Serialize all pending types and pending objects. Note that we don't
   789  	// bother removing from this list as we walk it because that just
   790  	// wastes time. It will not change after this point.
   791  	if err := safely(func() {
   792  		for _, wt := range es.pendingTypes {
   793  			// Encode the type.
   794  			wire.Save(es.w, &wt)
   795  		}
   796  		// Emit objects in ID order.
   797  		ids := make([]objectID, 0, len(es.pending))
   798  		for id := range es.pending {
   799  			ids = append(ids, id)
   800  		}
   801  		sort.Slice(ids, func(i, j int) bool {
   802  			return ids[i] < ids[j]
   803  		})
   804  		for _, id := range ids {
   805  			// Encode the id.
   806  			wire.Save(es.w, wire.Uint(id))
   807  			// Marshal the object.
   808  			oes := es.pending[id]
   809  			wire.Save(es.w, oes.encoded)
   810  		}
   811  	}); err != nil {
   812  		// Include the object and the error.
   813  		Failf("error serializing object %#v: %w", oes.encoded, err)
   814  	}
   815  }
   816  
   817  // objectFlag indicates that the length is a # of objects, rather than a raw
   818  // byte length. When this is set on a length header in the stream, it may be
   819  // decoded appropriately.
   820  const objectFlag uint64 = 1 << 63
   821  
   822  // WriteHeader writes a header.
   823  //
   824  // Each object written to the statefile should be prefixed with a header. In
   825  // order to generate statefiles that play nicely with debugging tools, raw
   826  // writes should be prefixed with a header with object set to false and the
   827  // appropriate length. This will allow tools to skip these regions.
   828  func WriteHeader(w io.Writer, length uint64, object bool) error {
   829  	// Sanity check the length.
   830  	if length&objectFlag != 0 {
   831  		Failf("impossibly huge length: %d", length)
   832  	}
   833  	if object {
   834  		length |= objectFlag
   835  	}
   836  
   837  	// Write a header.
   838  	return safely(func() {
   839  		wire.SaveUint(w, length)
   840  	})
   841  }
   842  
   843  // addrSetFunctions is used by addrSet.
   844  type addrSetFunctions struct{}
   845  
   846  func (addrSetFunctions) MinKey() uintptr {
   847  	return 0
   848  }
   849  
   850  func (addrSetFunctions) MaxKey() uintptr {
   851  	return ^uintptr(0)
   852  }
   853  
   854  func (addrSetFunctions) ClearValue(val **objectEncodeState) {
   855  	*val = nil
   856  }
   857  
   858  func (addrSetFunctions) Merge(r1 addrRange, val1 *objectEncodeState, r2 addrRange, val2 *objectEncodeState) (*objectEncodeState, bool) {
   859  	if val1.obj == val2.obj {
   860  		// This, should never happen. It would indicate that the same
   861  		// object exists in two non-contiguous address ranges. Note
   862  		// that this assertion can only be triggered if the race
   863  		// detector is enabled.
   864  		Failf("unexpected merge in addrSet @ %v and %v: %#v and %#v", r1, r2, val1.obj, val2.obj)
   865  	}
   866  	// Reject the merge.
   867  	return val1, false
   868  }
   869  
   870  func (addrSetFunctions) Split(r addrRange, val *objectEncodeState, _ uintptr) (*objectEncodeState, *objectEncodeState) {
   871  	// A split should never happen: we don't remove ranges.
   872  	Failf("unexpected split in addrSet @ %v: %#v", r, val.obj)
   873  	panic("unreachable")
   874  }