github.com/patricebensoussan/go/codec@v1.2.99/helper.go (about)

     1  // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
     2  // Use of this source code is governed by a MIT license found in the LICENSE file.
     3  
     4  package codec
     5  
     6  // Contains code shared by both encode and decode.
     7  
     8  // Some shared ideas around encoding/decoding
     9  // ------------------------------------------
    10  //
    11  // If an interface{} is passed, we first do a type assertion to see if it is
    12  // a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
    13  //
    14  // If we start with a reflect.Value, we are already in reflect.Value land and
    15  // will try to grab the function for the underlying Type and directly call that function.
    16  // This is more performant than calling reflect.Value.Interface().
    17  //
    18  // This still helps us bypass many layers of reflection, and give best performance.
    19  //
    20  // Containers
    21  // ------------
    22  // Containers in the stream are either associative arrays (key-value pairs) or
    23  // regular arrays (indexed by incrementing integers).
    24  //
    25  // Some streams support indefinite-length containers, and use a breaking
    26  // byte-sequence to denote that the container has come to an end.
    27  //
    28  // Some streams also are text-based, and use explicit separators to denote the
    29  // end/beginning of different values.
    30  //
    31  // Philosophy
    32  // ------------
    33  // On decode, this codec will update containers appropriately:
    34  //    - If struct, update fields from stream into fields of struct.
    35  //      If field in stream not found in struct, handle appropriately (based on option).
    36  //      If a struct field has no corresponding value in the stream, leave it AS IS.
    37  //      If nil in stream, set value to nil/zero value.
    38  //    - If map, update map from stream.
    39  //      If the stream value is NIL, set the map to nil.
    40  //    - if slice, try to update up to length of array in stream.
    41  //      if container len is less than stream array length,
    42  //      and container cannot be expanded, handled (based on option).
    43  //      This means you can decode 4-element stream array into 1-element array.
    44  //
    45  // ------------------------------------
    46  // On encode, user can specify omitEmpty. This means that the value will be omitted
    47  // if the zero value. The problem may occur during decode, where omitted values do not affect
    48  // the value being decoded into. This means that if decoding into a struct with an
    49  // int field with current value=5, and the field is omitted in the stream, then after
    50  // decoding, the value will still be 5 (not 0).
    51  // omitEmpty only works if you guarantee that you always decode into zero-values.
    52  //
    53  // ------------------------------------
    54  // We could have truncated a map to remove keys not available in the stream,
    55  // or set values in the struct which are not in the stream to their zero values.
    56  // We decided against it because there is no efficient way to do it.
    57  // We may introduce it as an option later.
    58  // However, that will require enabling it for both runtime and code generation modes.
    59  //
    60  // To support truncate, we need to do 2 passes over the container:
    61  //   map
    62  //   - first collect all keys (e.g. in k1)
    63  //   - for each key in stream, mark k1 that the key should not be removed
    64  //   - after updating map, do second pass and call delete for all keys in k1 which are not marked
    65  //   struct:
    66  //   - for each field, track the *typeInfo s1
    67  //   - iterate through all s1, and for each one not marked, set value to zero
    68  //   - this involves checking the possible anonymous fields which are nil ptrs.
    69  //     too much work.
    70  //
    71  // ------------------------------------------
    72  // Error Handling is done within the library using panic.
    73  //
    74  // This way, the code doesn't have to keep checking if an error has happened,
    75  // and we don't have to keep sending the error value along with each call
    76  // or storing it in the En|Decoder and checking it constantly along the way.
    77  //
    78  // We considered storing the error is En|Decoder.
    79  //   - once it has its err field set, it cannot be used again.
    80  //   - panicing will be optional, controlled by const flag.
    81  //   - code should always check error first and return early.
    82  //
    83  // We eventually decided against it as it makes the code clumsier to always
    84  // check for these error conditions.
    85  //
    86  // ------------------------------------------
    87  // We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
    88  // Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
    89  //
    90  // Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
    91  // Instead, the short-lived objects use free-lists that live as long as the object exists.
    92  //
    93  // ------------------------------------------
    94  // Performance is affected by the following:
    95  //    - Bounds Checking
    96  //    - Inlining
    97  //    - Pointer chasing
    98  // This package tries hard to manage the performance impact of these.
    99  //
   100  // ------------------------------------------
   101  // To alleviate performance due to pointer-chasing:
   102  //    - Prefer non-pointer values in a struct field
   103  //    - Refer to these directly within helper classes
   104  //      e.g. json.go refers directly to d.d.decRd
   105  //
   106  // We made the changes to embed En/Decoder in en/decDriver,
   107  // but we had to explicitly reference the fields as opposed to using a function
   108  // to get the better performance that we were looking for.
   109  // For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
   110  //
   111  // ------------------------------------------
   112  // Bounds Checking
   113  //    - Allow bytesDecReader to incur "bounds check error", and
   114  //      recover that as an io.EOF.
   115  //      This allows the bounds check branch to always be taken by the branch predictor,
   116  //      giving better performance (in theory), while ensuring that the code is shorter.
   117  //
   118  // ------------------------------------------
   119  // Escape Analysis
   120  //    - Prefer to return non-pointers if the value is used right away.
   121  //      Newly allocated values returned as pointers will be heap-allocated as they escape.
   122  //
   123  // Prefer functions and methods that
   124  //    - take no parameters and
   125  //    - return no results and
   126  //    - do not allocate.
   127  // These are optimized by the runtime.
   128  // For example, in json, we have dedicated functions for ReadMapElemKey, etc
   129  // which do not delegate to readDelim, as readDelim takes a parameter.
   130  // The difference in runtime was as much as 5%.
   131  //
   132  // ------------------------------------------
   133  // Handling Nil
   134  //   - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
   135  //   - Consequently, methods used with them as a parent in the chain e.g. kXXX
   136  //     methods do not handle nil.
   137  //   - Fastpath methods also do not handle nil.
   138  //     The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
   139  //   - codecgen will handle nil before calling into the library for further work also.
   140  //
   141  // ------------------------------------------
   142  // Passing reflect.Kind to functions that take a reflect.Value
   143  //   - Note that reflect.Value.Kind() is very cheap, as its fundamentally a binary AND of 2 numbers
   144  //
   145  // ------------------------------------------
   146  // Transient values during decoding
   147  //
   148  // With reflection, the stack is not used. Consequently, values which may be stack-allocated in
   149  // normal use will cause a heap allocation when using reflection.
   150  //
   151  // There are cases where we know that a value is transient, and we just need to decode into it
   152  // temporarily so we can right away use its value for something else.
   153  //
   154  // In these situations, we can elide the heap allocation by being deliberate with use of a pre-cached
   155  // scratch memory or scratch value.
   156  //
   157  // We use this for situations:
   158  // - decode into a temp value x, and then set x into an interface
   159  // - decode into a temp value, for use as a map key, to lookup up a map value
   160  // - decode into a temp value, for use as a map value, to set into a map
   161  // - decode into a temp value, for sending into a channel
   162  //
   163  // By definition, Transient values are NEVER pointer-shaped values,
   164  // like pointer, func, map, chan. Using transient for pointer-shaped values
   165  // can lead to data corruption when GC tries to follow what it saw as a pointer at one point.
   166  //
   167  // In general, transient values are values which can be decoded as an atomic value
   168  // using a single call to the decDriver. This naturally includes bool or numeric types.
   169  //
   170  // Note that some values which "contain" pointers, specifically string and slice,
   171  // can also be transient. In the case of string, it is decoded as an atomic value.
   172  // In the case of a slice, decoding into its elements always uses an addressable
   173  // value in memory ie we grow the slice, and then decode directly into the memory
   174  // address corresponding to that index in the slice.
   175  //
   176  // To handle these string and slice values, we have to use a scratch value
   177  // which has the same shape of a string or slice.
   178  //
   179  // Consequently, the full range of types which can be transient is:
   180  // - numbers
   181  // - bool
   182  // - string
   183  // - slice
   184  //
   185  // and whbut we MUST use a scratch space with that element
   186  // being defined as an unsafe.Pointer to start with.
   187  //
   188  // We have to be careful with maps. Because we iterate map keys and values during a range,
   189  // we must have 2 variants of the scratch space/value for maps and keys separately.
   190  //
   191  // These are the TransientAddrK and TransientAddr2K methods of decPerType.
   192  
   193  import (
   194  	"encoding"
   195  	"encoding/binary"
   196  	"errors"
   197  	"fmt"
   198  	"io"
   199  	"math"
   200  	"reflect"
   201  	"runtime"
   202  	"sort"
   203  	"strconv"
   204  	"strings"
   205  	"sync"
   206  	"sync/atomic"
   207  	"time"
   208  	"unicode/utf8"
   209  )
   210  
   211  // if debugging is true, then
   212  //   - within Encode/Decode, do not recover from panic's
   213  //   - etc
   214  //
   215  // Note: Negative tests that check for errors will fail, so only use this
   216  // when debugging, and run only one test at a time preferably.
   217  //
   218  // Note: RPC tests espeially fail, as they depend on getting the error from an Encode/Decode call.
   219  const debugging = false
   220  
   221  const (
   222  	// containerLenUnknown is length returned from Read(Map|Array)Len
   223  	// when a format doesn't know apiori.
   224  	// For example, json doesn't pre-determine the length of a container (sequence/map).
   225  	containerLenUnknown = -1
   226  
   227  	// containerLenNil is length returned from Read(Map|Array)Len
   228  	// when a 'nil' was encountered in the stream.
   229  	containerLenNil = math.MinInt32
   230  
   231  	// [N]byte is handled by converting to []byte first,
   232  	// and sending to the dedicated fast-path function for []byte.
   233  	//
   234  	// Code exists in case our understanding is wrong.
   235  	// keep the defensive code behind this flag, so we can remove/hide it if needed.
   236  	// For now, we enable the defensive code (ie set it to true).
   237  	handleBytesWithinKArray = true
   238  
   239  	// Support encoding.(Binary|Text)(Unm|M)arshaler.
   240  	// This constant flag will enable or disable it.
   241  	supportMarshalInterfaces = true
   242  
   243  	// bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte.
   244  	bytesFreeListNoCache = false
   245  
   246  	// size of the cacheline: defaulting to value for archs: amd64, arm64, 386
   247  	// should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
   248  	cacheLineSize = 64
   249  
   250  	wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
   251  	wordSize     = wordSizeBits / 8
   252  
   253  	// MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch.
   254  	// Calling the fastpath switch in encode() or decode() could be redundant,
   255  	// as we still have to introspect it again within fnLoad
   256  	// to determine the function to use for values of that type.
   257  	skipFastpathTypeSwitchInDirectCall = false
   258  )
   259  
   260  const cpu32Bit = ^uint(0)>>32 == 0
   261  
   262  type rkind byte
   263  
   264  const (
   265  	rkindPtr    = rkind(reflect.Ptr)
   266  	rkindString = rkind(reflect.String)
   267  	rkindChan   = rkind(reflect.Chan)
   268  )
   269  
   270  type mapKeyFastKind uint8
   271  
   272  const (
   273  	mapKeyFastKind32 = iota + 1
   274  	mapKeyFastKind32ptr
   275  	mapKeyFastKind64
   276  	mapKeyFastKind64ptr
   277  	mapKeyFastKindStr
   278  )
   279  
   280  var (
   281  	// use a global mutex to ensure each Handle is initialized.
   282  	// We do this, so we don't have to store the basicHandle mutex
   283  	// directly in BasicHandle, so it can be shallow-copied.
   284  	handleInitMu sync.Mutex
   285  
   286  	must mustHdl
   287  	halt panicHdl
   288  
   289  	digitCharBitset      bitset256
   290  	numCharBitset        bitset256
   291  	whitespaceCharBitset bitset256
   292  	asciiAlphaNumBitset  bitset256
   293  
   294  	// numCharWithExpBitset64 bitset64
   295  	// numCharNoExpBitset64   bitset64
   296  	// whitespaceCharBitset64 bitset64
   297  	//
   298  	// // hasptrBitset sets bit for all kinds which always have internal pointers
   299  	// hasptrBitset bitset32
   300  
   301  	// refBitset sets bit for all kinds which are direct internal references
   302  	refBitset bitset32
   303  
   304  	// isnilBitset sets bit for all kinds which can be compared to nil
   305  	isnilBitset bitset32
   306  
   307  	// numBoolBitset sets bit for all number and bool kinds
   308  	numBoolBitset bitset32
   309  
   310  	// numBoolStrSliceBitset sets bits for all kinds which are numbers, bool, strings and slices
   311  	numBoolStrSliceBitset bitset32
   312  
   313  	// scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable
   314  	scalarBitset bitset32
   315  
   316  	mapKeyFastKindVals [32]mapKeyFastKind
   317  
   318  	// codecgen is set to true by codecgen, so that tests, etc can use this information as needed.
   319  	codecgen bool
   320  
   321  	oneByteArr    [1]byte
   322  	zeroByteSlice = oneByteArr[:0:0]
   323  
   324  	eofReader devNullReader
   325  )
   326  
   327  var (
   328  	errMapTypeNotMapKind     = errors.New("MapType MUST be of Map Kind")
   329  	errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
   330  
   331  	errExtFnWriteExtUnsupported   = errors.New("BytesExt.WriteExt is not supported")
   332  	errExtFnReadExtUnsupported    = errors.New("BytesExt.ReadExt is not supported")
   333  	errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
   334  	errExtFnUpdateExtUnsupported  = errors.New("InterfaceExt.UpdateExt is not supported")
   335  
   336  	errPanicUndefined = errors.New("panic: undefined error")
   337  
   338  	errHandleInited = errors.New("cannot modify initialized Handle")
   339  
   340  	errNoFormatHandle = errors.New("no handle (cannot identify format)")
   341  )
   342  
   343  var pool4tiload = sync.Pool{
   344  	New: func() interface{} {
   345  		return &typeInfoLoad{
   346  			etypes:   make([]uintptr, 0, 4),
   347  			sfis:     make([]structFieldInfo, 0, 4),
   348  			sfiNames: make(map[string]uint16, 4),
   349  		}
   350  	},
   351  }
   352  
   353  func init() {
   354  	xx := func(f mapKeyFastKind, k ...reflect.Kind) {
   355  		for _, v := range k {
   356  			mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
   357  		}
   358  	}
   359  
   360  	var f mapKeyFastKind
   361  
   362  	f = mapKeyFastKind64
   363  	if wordSizeBits == 32 {
   364  		f = mapKeyFastKind32
   365  	}
   366  	xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
   367  
   368  	f = mapKeyFastKind64ptr
   369  	if wordSizeBits == 32 {
   370  		f = mapKeyFastKind32ptr
   371  	}
   372  	xx(f, reflect.Ptr)
   373  
   374  	xx(mapKeyFastKindStr, reflect.String)
   375  	xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
   376  	xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
   377  
   378  	numBoolBitset.
   379  		set(byte(reflect.Bool)).
   380  		set(byte(reflect.Int)).
   381  		set(byte(reflect.Int8)).
   382  		set(byte(reflect.Int16)).
   383  		set(byte(reflect.Int32)).
   384  		set(byte(reflect.Int64)).
   385  		set(byte(reflect.Uint)).
   386  		set(byte(reflect.Uint8)).
   387  		set(byte(reflect.Uint16)).
   388  		set(byte(reflect.Uint32)).
   389  		set(byte(reflect.Uint64)).
   390  		set(byte(reflect.Uintptr)).
   391  		set(byte(reflect.Float32)).
   392  		set(byte(reflect.Float64)).
   393  		set(byte(reflect.Complex64)).
   394  		set(byte(reflect.Complex128))
   395  
   396  	numBoolStrSliceBitset = numBoolBitset
   397  
   398  	numBoolStrSliceBitset.
   399  		set(byte(reflect.String)).
   400  		set(byte(reflect.Slice))
   401  
   402  	scalarBitset = numBoolBitset
   403  
   404  	scalarBitset.
   405  		set(byte(reflect.String))
   406  
   407  	// MARKER: reflect.Array is not a scalar, as its contents can be modified.
   408  
   409  	refBitset.
   410  		set(byte(reflect.Map)).
   411  		set(byte(reflect.Ptr)).
   412  		set(byte(reflect.Func)).
   413  		set(byte(reflect.Chan)).
   414  		set(byte(reflect.UnsafePointer))
   415  
   416  	isnilBitset = refBitset
   417  
   418  	isnilBitset.
   419  		set(byte(reflect.Interface)).
   420  		set(byte(reflect.Slice))
   421  
   422  	// hasptrBitset = isnilBitset
   423  	//
   424  	// hasptrBitset.
   425  	// 	set(byte(reflect.String))
   426  
   427  	for i := byte(0); i <= utf8.RuneSelf; i++ {
   428  		if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') {
   429  			asciiAlphaNumBitset.set(i)
   430  		}
   431  		switch i {
   432  		case ' ', '\t', '\r', '\n':
   433  			whitespaceCharBitset.set(i)
   434  		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
   435  			digitCharBitset.set(i)
   436  			numCharBitset.set(i)
   437  		case '.', '+', '-':
   438  			numCharBitset.set(i)
   439  		case 'e', 'E':
   440  			numCharBitset.set(i)
   441  		}
   442  	}
   443  }
   444  
   445  // driverStateManager supports the runtime state of an (enc|dec)Driver.
   446  //
   447  // During a side(En|De)code call, we can capture the state, reset it,
   448  // and then restore it later to continue the primary encoding/decoding.
   449  type driverStateManager interface {
   450  	resetState()
   451  	captureState() interface{}
   452  	restoreState(state interface{})
   453  }
   454  
   455  type bdAndBdread struct {
   456  	bdRead bool
   457  	bd     byte
   458  }
   459  
   460  func (x bdAndBdread) captureState() interface{}   { return x }
   461  func (x *bdAndBdread) resetState()                { x.bd, x.bdRead = 0, false }
   462  func (x *bdAndBdread) reset()                     { x.resetState() }
   463  func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) }
   464  
   465  type clsErr struct {
   466  	err    error // error on closing
   467  	closed bool  // is it closed?
   468  }
   469  
   470  type charEncoding uint8
   471  
   472  const (
   473  	_ charEncoding = iota // make 0 unset
   474  	cUTF8
   475  	cUTF16LE
   476  	cUTF16BE
   477  	cUTF32LE
   478  	cUTF32BE
   479  	// Deprecated: not a true char encoding value
   480  	cRAW charEncoding = 255
   481  )
   482  
   483  // valueType is the stream type
   484  type valueType uint8
   485  
   486  const (
   487  	valueTypeUnset valueType = iota
   488  	valueTypeNil
   489  	valueTypeInt
   490  	valueTypeUint
   491  	valueTypeFloat
   492  	valueTypeBool
   493  	valueTypeString
   494  	valueTypeSymbol
   495  	valueTypeBytes
   496  	valueTypeMap
   497  	valueTypeArray
   498  	valueTypeTime
   499  	valueTypeExt
   500  
   501  	// valueTypeInvalid = 0xff
   502  )
   503  
   504  var valueTypeStrings = [...]string{
   505  	"Unset",
   506  	"Nil",
   507  	"Int",
   508  	"Uint",
   509  	"Float",
   510  	"Bool",
   511  	"String",
   512  	"Symbol",
   513  	"Bytes",
   514  	"Map",
   515  	"Array",
   516  	"Timestamp",
   517  	"Ext",
   518  }
   519  
   520  func (x valueType) String() string {
   521  	if int(x) < len(valueTypeStrings) {
   522  		return valueTypeStrings[x]
   523  	}
   524  	return strconv.FormatInt(int64(x), 10)
   525  }
   526  
   527  // note that containerMapStart and containerArraySend are not sent.
   528  // This is because the ReadXXXStart and EncodeXXXStart already does these.
   529  type containerState uint8
   530  
   531  const (
   532  	_ containerState = iota
   533  
   534  	containerMapStart
   535  	containerMapKey
   536  	containerMapValue
   537  	containerMapEnd
   538  	containerArrayStart
   539  	containerArrayElem
   540  	containerArrayEnd
   541  )
   542  
   543  // do not recurse if a containing type refers to an embedded type
   544  // which refers back to its containing type (via a pointer).
   545  // The second time this back-reference happens, break out,
   546  // so as not to cause an infinite loop.
   547  const rgetMaxRecursion = 2
   548  
   549  // fauxUnion is used to keep track of the primitives decoded.
   550  //
   551  // Without it, we would have to decode each primitive and wrap it
   552  // in an interface{}, causing an allocation.
   553  // In this model, the primitives are decoded in a "pseudo-atomic" fashion,
   554  // so we can rest assured that no other decoding happens while these
   555  // primitives are being decoded.
   556  //
   557  // maps and arrays are not handled by this mechanism.
   558  type fauxUnion struct {
   559  	// r RawExt // used for RawExt, uint, []byte.
   560  
   561  	// primitives below
   562  	u uint64
   563  	i int64
   564  	f float64
   565  	l []byte
   566  	s string
   567  
   568  	// ---- cpu cache line boundary?
   569  	t time.Time
   570  	b bool
   571  
   572  	// state
   573  	v valueType
   574  }
   575  
   576  // typeInfoLoad is a transient object used while loading up a typeInfo.
   577  type typeInfoLoad struct {
   578  	etypes   []uintptr
   579  	sfis     []structFieldInfo
   580  	sfiNames map[string]uint16
   581  }
   582  
   583  func (x *typeInfoLoad) reset() {
   584  	x.etypes = x.etypes[:0]
   585  	x.sfis = x.sfis[:0]
   586  	for k := range x.sfiNames { // optimized to zero the map
   587  		delete(x.sfiNames, k)
   588  	}
   589  }
   590  
   591  // mirror json.Marshaler and json.Unmarshaler here,
   592  // so we don't import the encoding/json package
   593  
   594  type jsonMarshaler interface {
   595  	MarshalJSON() ([]byte, error)
   596  }
   597  type jsonUnmarshaler interface {
   598  	UnmarshalJSON([]byte) error
   599  }
   600  
   601  type isZeroer interface {
   602  	IsZero() bool
   603  }
   604  
   605  type isCodecEmptyer interface {
   606  	IsCodecEmpty() bool
   607  }
   608  
   609  type codecError struct {
   610  	err    error
   611  	name   string
   612  	pos    int
   613  	encode bool
   614  }
   615  
   616  func (e *codecError) Cause() error {
   617  	return e.err
   618  }
   619  
   620  func (e *codecError) Error() string {
   621  	if e.encode {
   622  		return fmt.Sprintf("%s encode error: %v", e.name, e.err)
   623  	}
   624  	return fmt.Sprintf("%s decode error [pos %d]: %v", e.name, e.pos, e.err)
   625  }
   626  
   627  func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out error) {
   628  	x, ok := in.(*codecError)
   629  	if ok && x.pos == numbytesread && x.name == name && x.encode == encode {
   630  		return in
   631  	}
   632  	return &codecError{in, name, numbytesread, encode}
   633  }
   634  
   635  var (
   636  	bigen bigenHelper
   637  
   638  	bigenstd = binary.BigEndian
   639  
   640  	structInfoFieldName = "_struct"
   641  
   642  	mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
   643  	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
   644  	intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
   645  	intfTyp        = intfSliceTyp.Elem()
   646  
   647  	reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
   648  
   649  	stringTyp     = reflect.TypeOf("")
   650  	timeTyp       = reflect.TypeOf(time.Time{})
   651  	rawExtTyp     = reflect.TypeOf(RawExt{})
   652  	rawTyp        = reflect.TypeOf(Raw{})
   653  	uintptrTyp    = reflect.TypeOf(uintptr(0))
   654  	uint8Typ      = reflect.TypeOf(uint8(0))
   655  	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
   656  	uintTyp       = reflect.TypeOf(uint(0))
   657  	intTyp        = reflect.TypeOf(int(0))
   658  
   659  	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
   660  
   661  	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
   662  	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
   663  
   664  	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
   665  	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
   666  
   667  	jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
   668  	jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
   669  
   670  	selferTyp                = reflect.TypeOf((*Selfer)(nil)).Elem()
   671  	missingFielderTyp        = reflect.TypeOf((*MissingFielder)(nil)).Elem()
   672  	iszeroTyp                = reflect.TypeOf((*isZeroer)(nil)).Elem()
   673  	isCodecEmptyerTyp        = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem()
   674  	isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem()
   675  
   676  	uint8TypId      = rt2id(uint8Typ)
   677  	uint8SliceTypId = rt2id(uint8SliceTyp)
   678  	rawExtTypId     = rt2id(rawExtTyp)
   679  	rawTypId        = rt2id(rawTyp)
   680  	intfTypId       = rt2id(intfTyp)
   681  	timeTypId       = rt2id(timeTyp)
   682  	stringTypId     = rt2id(stringTyp)
   683  
   684  	mapStrIntfTypId  = rt2id(mapStrIntfTyp)
   685  	mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
   686  	intfSliceTypId   = rt2id(intfSliceTyp)
   687  	// mapBySliceTypId  = rt2id(mapBySliceTyp)
   688  
   689  	intBitsize  = uint8(intTyp.Bits())
   690  	uintBitsize = uint8(uintTyp.Bits())
   691  
   692  	// bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
   693  	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
   694  
   695  	chkOvf checkOverflow
   696  )
   697  
   698  var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
   699  
   700  // SelfExt is a sentinel extension signifying that types
   701  // registered with it SHOULD be encoded and decoded
   702  // based on the native mode of the format.
   703  //
   704  // This allows users to define a tag for an extension,
   705  // but signify that the types should be encoded/decoded as the native encoding.
   706  // This way, users need not also define how to encode or decode the extension.
   707  var SelfExt = &extFailWrapper{}
   708  
   709  // Selfer defines methods by which a value can encode or decode itself.
   710  //
   711  // Any type which implements Selfer will be able to encode or decode itself.
   712  // Consequently, during (en|de)code, this takes precedence over
   713  // (text|binary)(M|Unm)arshal or extension support.
   714  //
   715  // By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
   716  // If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
   717  // For example, the snippet below will cause such an error.
   718  //     type testSelferRecur struct{}
   719  //     func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
   720  //     func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
   721  //
   722  // Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
   723  // This is because, during each decode, we first check the the next set of bytes
   724  // represent nil, and if so, we just set the value to nil.
   725  type Selfer interface {
   726  	CodecEncodeSelf(*Encoder)
   727  	CodecDecodeSelf(*Decoder)
   728  }
   729  
   730  type isSelferViaCodecgener interface {
   731  	codecSelferViaCodecgen()
   732  }
   733  
   734  // MissingFielder defines the interface allowing structs to internally decode or encode
   735  // values which do not map to struct fields.
   736  //
   737  // We expect that this interface is bound to a pointer type (so the mutation function works).
   738  //
   739  // A use-case is if a version of a type unexports a field, but you want compatibility between
   740  // both versions during encoding and decoding.
   741  //
   742  // Note that the interface is completely ignored during codecgen.
   743  type MissingFielder interface {
   744  	// CodecMissingField is called to set a missing field and value pair.
   745  	//
   746  	// It returns true if the missing field was set on the struct.
   747  	CodecMissingField(field []byte, value interface{}) bool
   748  
   749  	// CodecMissingFields returns the set of fields which are not struct fields.
   750  	//
   751  	// Note that the returned map may be mutated by the caller.
   752  	CodecMissingFields() map[string]interface{}
   753  }
   754  
   755  // MapBySlice is a tag interface that denotes the slice or array value should encode as a map
   756  // in the stream, and can be decoded from a map in the stream.
   757  //
   758  // The slice or array must contain a sequence of key-value pairs.
   759  // The length of the slice or array must be even (fully divisible by 2).
   760  //
   761  // This affords storing a map in a specific sequence in the stream.
   762  //
   763  // Example usage:
   764  //    type T1 []string         // or []int or []Point or any other "slice" type
   765  //    func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
   766  //    type T2 struct { KeyValues T1 }
   767  //
   768  //    var kvs = []string{"one", "1", "two", "2", "three", "3"}
   769  //    var v2 = T2{ KeyValues: T1(kvs) }
   770  //    // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
   771  //
   772  // The support of MapBySlice affords the following:
   773  //   - A slice or array type which implements MapBySlice will be encoded as a map
   774  //   - A slice can be decoded from a map in the stream
   775  type MapBySlice interface {
   776  	MapBySlice()
   777  }
   778  
   779  // basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information.
   780  //
   781  // Storing this outside BasicHandle allows us create shallow copies of a Handle,
   782  // which can be used e.g. when we need to modify config fields temporarily.
   783  // Shallow copies are used within tests, so we can modify some config fields for a test
   784  // temporarily when running tests in parallel, without running the risk that a test executing
   785  // in parallel with other tests does not see a transient modified values not meant for it.
   786  type basicHandleRuntimeState struct {
   787  	// these are used during runtime.
   788  	// At init time, they should have nothing in them.
   789  	rtidFns      atomicRtidFnSlice
   790  	rtidFnsNoExt atomicRtidFnSlice
   791  
   792  	// Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls).
   793  	// If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
   794  	// Thses slices are used all the time, so keep as slices (not pointers).
   795  
   796  	extHandle
   797  
   798  	intf2impls
   799  
   800  	mu sync.Mutex
   801  
   802  	jsonHandle   bool
   803  	binaryHandle bool
   804  
   805  	// timeBuiltin is initialized from TimeNotBuiltin, and used internally.
   806  	// once initialized, it cannot be changed, as the function for encoding/decoding time.Time
   807  	// will have been cached and the TimeNotBuiltin value will not be consulted thereafter.
   808  	timeBuiltin bool
   809  	_           bool // padding
   810  }
   811  
   812  // BasicHandle encapsulates the common options and extension functions.
   813  //
   814  // Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
   815  type BasicHandle struct {
   816  	// BasicHandle is always a part of a different type.
   817  	// It doesn't have to fit into it own cache lines.
   818  
   819  	// TypeInfos is used to get the type info for any type.
   820  	//
   821  	// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
   822  	TypeInfos *TypeInfos
   823  
   824  	*basicHandleRuntimeState
   825  
   826  	// ---- cache line
   827  
   828  	DecodeOptions
   829  
   830  	// ---- cache line
   831  
   832  	EncodeOptions
   833  
   834  	RPCOptions
   835  
   836  	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
   837  	//
   838  	// All Handlers should know how to encode/decode time.Time as part of the core
   839  	// format specification, or as a standard extension defined by the format.
   840  	//
   841  	// However, users can elect to handle time.Time as a custom extension, or via the
   842  	// standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
   843  	// To elect this behavior, users can set TimeNotBuiltin=true.
   844  	//
   845  	// Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
   846  	// (for Cbor and Msgpack), where time.Time was not a builtin supported type.
   847  	//
   848  	// Note: DO NOT CHANGE AFTER FIRST USE.
   849  	//
   850  	// Once a Handle has been initialized (used), do not modify this option. It will be ignored.
   851  	TimeNotBuiltin bool
   852  
   853  	// ExplicitRelease configures whether Release() is implicitly called after an encode or
   854  	// decode call.
   855  	//
   856  	// If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
   857  	// on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
   858  	// then you do not want it to be implicitly closed after each Encode/Decode call.
   859  	// Doing so will unnecessarily return resources to the shared pool, only for you to
   860  	// grab them right after again to do another Encode/Decode call.
   861  	//
   862  	// Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
   863  	// you are truly done.
   864  	//
   865  	// As an alternative, you can explicitly set a finalizer - so its resources
   866  	// are returned to the shared pool before it is garbage-collected. Do it as below:
   867  	//    runtime.SetFinalizer(e, (*Encoder).Release)
   868  	//    runtime.SetFinalizer(d, (*Decoder).Release)
   869  	//
   870  	// Deprecated: This is not longer used as pools are only used for long-lived objects
   871  	// which are shared across goroutines.
   872  	// Setting this value has no effect. It is maintained for backward compatibility.
   873  	ExplicitRelease bool
   874  
   875  	// ---- cache line
   876  	inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
   877  
   878  }
   879  
   880  // initHandle does a one-time initialization of the handle.
   881  // After this is run, do not modify the Handle, as some modifications are ignored
   882  // e.g. extensions, registered interfaces, TimeNotBuiltIn, etc
   883  func initHandle(hh Handle) {
   884  	x := hh.getBasicHandle()
   885  
   886  	// MARKER: We need to simulate once.Do, to ensure no data race within the block.
   887  	// Consequently, below would not work.
   888  	//
   889  	// if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
   890  	// 	x.be = hh.isBinary()
   891  	// 	x.js = hh.isJson
   892  	// 	x.n = hh.Name()[0]
   893  	// }
   894  
   895  	// simulate once.Do using our own stored flag and mutex as a CompareAndSwap
   896  	// is not sufficient, since a race condition can occur within init(Handle) function.
   897  	// init is made noinline, so that this function can be inlined by its caller.
   898  	if atomic.LoadUint32(&x.inited) == 0 {
   899  		x.initHandle(hh)
   900  	}
   901  }
   902  
   903  func (x *BasicHandle) basicInit() {
   904  	x.rtidFns.store(nil)
   905  	x.rtidFnsNoExt.store(nil)
   906  	x.timeBuiltin = !x.TimeNotBuiltin
   907  }
   908  
   909  func (x *BasicHandle) init() {}
   910  
   911  func (x *BasicHandle) isInited() bool {
   912  	return atomic.LoadUint32(&x.inited) != 0
   913  }
   914  
   915  // clearInited: DANGEROUS - only use in testing, etc
   916  func (x *BasicHandle) clearInited() {
   917  	atomic.StoreUint32(&x.inited, 0)
   918  }
   919  
   920  // TimeBuiltin returns whether time.Time OOTB support is used,
   921  // based on the initial configuration of TimeNotBuiltin
   922  func (x *basicHandleRuntimeState) TimeBuiltin() bool {
   923  	return x.timeBuiltin
   924  }
   925  
   926  func (x *basicHandleRuntimeState) isJs() bool {
   927  	return x.jsonHandle
   928  }
   929  
   930  func (x *basicHandleRuntimeState) isBe() bool {
   931  	return x.binaryHandle
   932  }
   933  
   934  func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
   935  	rk := rt.Kind()
   936  	for rk == reflect.Ptr {
   937  		rt = rt.Elem()
   938  		rk = rt.Kind()
   939  	}
   940  
   941  	if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
   942  		return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
   943  	}
   944  
   945  	rtid := rt2id(rt)
   946  	switch rtid {
   947  	case timeTypId, rawTypId, rawExtTypId:
   948  		// these are all natively supported type, so they cannot have an extension.
   949  		// However, we do not return an error for these, as we do not document that.
   950  		// Instead, we silently treat as a no-op, and return.
   951  		return
   952  	}
   953  	for i := range x.extHandle {
   954  		v := &x.extHandle[i]
   955  		if v.rtid == rtid {
   956  			v.tag, v.ext = tag, ext
   957  			return
   958  		}
   959  	}
   960  	rtidptr := rt2id(reflect.PtrTo(rt))
   961  	x.extHandle = append(x.extHandle, extTypeTagFn{rtid, rtidptr, rt, tag, ext})
   962  	return
   963  }
   964  
   965  // initHandle should be called only from codec.initHandle global function.
   966  // make it uninlineable, as it is called at most once for each handle.
   967  //go:noinline
   968  func (x *BasicHandle) initHandle(hh Handle) {
   969  	handleInitMu.Lock()
   970  	defer handleInitMu.Unlock() // use defer, as halt may panic below
   971  	if x.inited == 0 {
   972  		if x.basicHandleRuntimeState == nil {
   973  			x.basicHandleRuntimeState = new(basicHandleRuntimeState)
   974  		}
   975  		x.jsonHandle = hh.isJson()
   976  		x.binaryHandle = hh.isBinary()
   977  		// ensure MapType and SliceType are of correct type
   978  		if x.MapType != nil && x.MapType.Kind() != reflect.Map {
   979  			halt.onerror(errMapTypeNotMapKind)
   980  		}
   981  		if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
   982  			halt.onerror(errSliceTypeNotSliceKind)
   983  		}
   984  		x.basicInit()
   985  		hh.init()
   986  		atomic.StoreUint32(&x.inited, 1)
   987  	}
   988  }
   989  
   990  func (x *BasicHandle) getBasicHandle() *BasicHandle {
   991  	return x
   992  }
   993  
   994  func (x *BasicHandle) typeInfos() *TypeInfos {
   995  	if x.TypeInfos != nil {
   996  		return x.TypeInfos
   997  	}
   998  	return defTypeInfos
   999  }
  1000  
  1001  func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  1002  	return x.typeInfos().get(rtid, rt)
  1003  }
  1004  
  1005  func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
  1006  	// binary search. adapted from sort/search.go.
  1007  	// Note: we use goto (instead of for loop) so this can be inlined.
  1008  
  1009  	// h, i, j := 0, 0, len(s)
  1010  	var h uint // var h, i uint
  1011  	var j = uint(len(s))
  1012  LOOP:
  1013  	if i < j {
  1014  		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
  1015  		if s[h].rtid < rtid {
  1016  			i = h + 1
  1017  		} else {
  1018  			j = h
  1019  		}
  1020  		goto LOOP
  1021  	}
  1022  	if i < uint(len(s)) && s[i].rtid == rtid {
  1023  		fn = s[i].fn
  1024  	}
  1025  	return
  1026  }
  1027  
  1028  func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
  1029  	return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true)
  1030  }
  1031  
  1032  func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
  1033  	return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false)
  1034  }
  1035  
  1036  func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) {
  1037  	rtid := rt2id(rt)
  1038  	sp := fs.load()
  1039  	if sp != nil {
  1040  		if _, fn = findRtidFn(sp, rtid); fn != nil {
  1041  			return
  1042  		}
  1043  	}
  1044  
  1045  	fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt)
  1046  	x.mu.Lock()
  1047  	sp = fs.load()
  1048  	// since this is an atomic load/store, we MUST use a different array each time,
  1049  	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
  1050  	if sp == nil {
  1051  		sp = []codecRtidFn{{rtid, fn}}
  1052  		fs.store(sp)
  1053  	} else {
  1054  		idx, fn2 := findRtidFn(sp, rtid)
  1055  		if fn2 == nil {
  1056  			sp2 := make([]codecRtidFn, len(sp)+1)
  1057  			copy(sp2[idx+1:], sp[idx:])
  1058  			copy(sp2, sp[:idx])
  1059  			sp2[idx] = codecRtidFn{rtid, fn}
  1060  			fs.store(sp2)
  1061  		}
  1062  	}
  1063  	x.mu.Unlock()
  1064  	return
  1065  }
  1066  
  1067  func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) {
  1068  	var rtid uintptr
  1069  	var idx int
  1070  	rtid = rt2id(ti.fastpathUnderlying)
  1071  	idx = fastpathAvIndex(rtid)
  1072  	if idx == -1 {
  1073  		return
  1074  	}
  1075  	f = &fastpathAv[idx]
  1076  	if uint8(reflect.Array) == ti.kind {
  1077  		u = reflectArrayOf(ti.rt.Len(), ti.elem)
  1078  	} else {
  1079  		u = f.rt
  1080  	}
  1081  	return
  1082  }
  1083  
  1084  func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) {
  1085  	fn = new(codecFn)
  1086  	fi := &(fn.i)
  1087  	ti := tinfos.get(rtid, rt)
  1088  	fi.ti = ti
  1089  	rk := reflect.Kind(ti.kind)
  1090  
  1091  	// anything can be an extension except the built-in ones: time, raw and rawext.
  1092  	// ensure we check for these types, then if extension, before checking if
  1093  	// it implementes one of the pre-declared interfaces.
  1094  
  1095  	fi.addrDf = true
  1096  	// fi.addrEf = true
  1097  
  1098  	if rtid == timeTypId && x.timeBuiltin {
  1099  		fn.fe = (*Encoder).kTime
  1100  		fn.fd = (*Decoder).kTime
  1101  	} else if rtid == rawTypId {
  1102  		fn.fe = (*Encoder).raw
  1103  		fn.fd = (*Decoder).raw
  1104  	} else if rtid == rawExtTypId {
  1105  		fn.fe = (*Encoder).rawExt
  1106  		fn.fd = (*Decoder).rawExt
  1107  		fi.addrD = true
  1108  		fi.addrE = true
  1109  	} else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
  1110  		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
  1111  		fn.fe = (*Encoder).ext
  1112  		fn.fd = (*Decoder).ext
  1113  		fi.addrD = true
  1114  		if rk == reflect.Struct || rk == reflect.Array {
  1115  			fi.addrE = true
  1116  		}
  1117  	} else if (ti.flagSelfer || ti.flagSelferPtr) &&
  1118  		!(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) {
  1119  		// do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true
  1120  		fn.fe = (*Encoder).selferMarshal
  1121  		fn.fd = (*Decoder).selferUnmarshal
  1122  		fi.addrD = ti.flagSelferPtr
  1123  		fi.addrE = ti.flagSelferPtr
  1124  	} else if supportMarshalInterfaces && x.isBe() &&
  1125  		(ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) &&
  1126  		(ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) {
  1127  		fn.fe = (*Encoder).binaryMarshal
  1128  		fn.fd = (*Decoder).binaryUnmarshal
  1129  		fi.addrD = ti.flagBinaryUnmarshalerPtr
  1130  		fi.addrE = ti.flagBinaryMarshalerPtr
  1131  	} else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
  1132  		(ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) &&
  1133  		(ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) {
  1134  		//If JSON, we should check JSONMarshal before textMarshal
  1135  		fn.fe = (*Encoder).jsonMarshal
  1136  		fn.fd = (*Decoder).jsonUnmarshal
  1137  		fi.addrD = ti.flagJsonUnmarshalerPtr
  1138  		fi.addrE = ti.flagJsonMarshalerPtr
  1139  	} else if supportMarshalInterfaces && !x.isBe() &&
  1140  		(ti.flagTextMarshaler || ti.flagTextMarshalerPtr) &&
  1141  		(ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) {
  1142  		fn.fe = (*Encoder).textMarshal
  1143  		fn.fd = (*Decoder).textUnmarshal
  1144  		fi.addrD = ti.flagTextUnmarshalerPtr
  1145  		fi.addrE = ti.flagTextMarshalerPtr
  1146  	} else {
  1147  		if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) {
  1148  			// by default (without using unsafe),
  1149  			// if an array is not addressable, converting from an array to a slice
  1150  			// requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array).
  1151  			//
  1152  			// (Non-addressable arrays mostly occur as keys/values from a map).
  1153  			//
  1154  			// However, fastpath functions are mostly for slices of numbers or strings,
  1155  			// which are small by definition and thus allocation should be fast/cheap in time.
  1156  			//
  1157  			// Consequently, the value of doing this quick allocation to elide the overhead cost of
  1158  			// non-optimized (not-unsafe) reflection is a fair price.
  1159  			var rtid2 uintptr
  1160  			if !ti.flagHasPkgPath { // un-named type (slice or mpa or array)
  1161  				rtid2 = rtid
  1162  				if rk == reflect.Array {
  1163  					rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem)
  1164  				}
  1165  				if idx := fastpathAvIndex(rtid2); idx != -1 {
  1166  					fn.fe = fastpathAv[idx].encfn
  1167  					fn.fd = fastpathAv[idx].decfn
  1168  					fi.addrD = true
  1169  					fi.addrDf = false
  1170  					if rk == reflect.Array {
  1171  						fi.addrD = false // decode directly into array value (slice made from it)
  1172  					}
  1173  				}
  1174  			} else { // named type (with underlying type of map or slice or array)
  1175  				// try to use mapping for underlying type
  1176  				xfe, xrt := fnloadFastpathUnderlying(ti)
  1177  				if xfe != nil {
  1178  					xfnf := xfe.encfn
  1179  					xfnf2 := xfe.decfn
  1180  					if rk == reflect.Array {
  1181  						fi.addrD = false // decode directly into array value (slice made from it)
  1182  						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1183  							xfnf2(d, xf, rvConvert(xrv, xrt))
  1184  						}
  1185  					} else {
  1186  						fi.addrD = true
  1187  						fi.addrDf = false // meaning it can be an address(ptr) or a value
  1188  						xptr2rt := reflect.PtrTo(xrt)
  1189  						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1190  							if xrv.Kind() == reflect.Ptr {
  1191  								xfnf2(d, xf, rvConvert(xrv, xptr2rt))
  1192  							} else {
  1193  								xfnf2(d, xf, rvConvert(xrv, xrt))
  1194  							}
  1195  						}
  1196  					}
  1197  					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
  1198  						xfnf(e, xf, rvConvert(xrv, xrt))
  1199  					}
  1200  				}
  1201  			}
  1202  		}
  1203  		if fn.fe == nil && fn.fd == nil {
  1204  			switch rk {
  1205  			case reflect.Bool:
  1206  				fn.fe = (*Encoder).kBool
  1207  				fn.fd = (*Decoder).kBool
  1208  			case reflect.String:
  1209  				// Do not use different functions based on StringToRaw option, as that will statically
  1210  				// set the function for a string type, and if the Handle is modified thereafter,
  1211  				// behaviour is non-deterministic
  1212  				// i.e. DO NOT DO:
  1213  				//   if x.StringToRaw {
  1214  				//   	fn.fe = (*Encoder).kStringToRaw
  1215  				//   } else {
  1216  				//   	fn.fe = (*Encoder).kStringEnc
  1217  				//   }
  1218  
  1219  				fn.fe = (*Encoder).kString
  1220  				fn.fd = (*Decoder).kString
  1221  			case reflect.Int:
  1222  				fn.fd = (*Decoder).kInt
  1223  				fn.fe = (*Encoder).kInt
  1224  			case reflect.Int8:
  1225  				fn.fe = (*Encoder).kInt8
  1226  				fn.fd = (*Decoder).kInt8
  1227  			case reflect.Int16:
  1228  				fn.fe = (*Encoder).kInt16
  1229  				fn.fd = (*Decoder).kInt16
  1230  			case reflect.Int32:
  1231  				fn.fe = (*Encoder).kInt32
  1232  				fn.fd = (*Decoder).kInt32
  1233  			case reflect.Int64:
  1234  				fn.fe = (*Encoder).kInt64
  1235  				fn.fd = (*Decoder).kInt64
  1236  			case reflect.Uint:
  1237  				fn.fd = (*Decoder).kUint
  1238  				fn.fe = (*Encoder).kUint
  1239  			case reflect.Uint8:
  1240  				fn.fe = (*Encoder).kUint8
  1241  				fn.fd = (*Decoder).kUint8
  1242  			case reflect.Uint16:
  1243  				fn.fe = (*Encoder).kUint16
  1244  				fn.fd = (*Decoder).kUint16
  1245  			case reflect.Uint32:
  1246  				fn.fe = (*Encoder).kUint32
  1247  				fn.fd = (*Decoder).kUint32
  1248  			case reflect.Uint64:
  1249  				fn.fe = (*Encoder).kUint64
  1250  				fn.fd = (*Decoder).kUint64
  1251  			case reflect.Uintptr:
  1252  				fn.fe = (*Encoder).kUintptr
  1253  				fn.fd = (*Decoder).kUintptr
  1254  			case reflect.Float32:
  1255  				fn.fe = (*Encoder).kFloat32
  1256  				fn.fd = (*Decoder).kFloat32
  1257  			case reflect.Float64:
  1258  				fn.fe = (*Encoder).kFloat64
  1259  				fn.fd = (*Decoder).kFloat64
  1260  			case reflect.Complex64:
  1261  				fn.fe = (*Encoder).kComplex64
  1262  				fn.fd = (*Decoder).kComplex64
  1263  			case reflect.Complex128:
  1264  				fn.fe = (*Encoder).kComplex128
  1265  				fn.fd = (*Decoder).kComplex128
  1266  			case reflect.Chan:
  1267  				fn.fe = (*Encoder).kChan
  1268  				fn.fd = (*Decoder).kChan
  1269  			case reflect.Slice:
  1270  				fn.fe = (*Encoder).kSlice
  1271  				fn.fd = (*Decoder).kSlice
  1272  			case reflect.Array:
  1273  				fi.addrD = false // decode directly into array value (slice made from it)
  1274  				fn.fe = (*Encoder).kArray
  1275  				fn.fd = (*Decoder).kArray
  1276  			case reflect.Struct:
  1277  				if ti.anyOmitEmpty ||
  1278  					ti.flagMissingFielder ||
  1279  					ti.flagMissingFielderPtr {
  1280  					fn.fe = (*Encoder).kStruct
  1281  				} else {
  1282  					fn.fe = (*Encoder).kStructNoOmitempty
  1283  				}
  1284  				fn.fd = (*Decoder).kStruct
  1285  			case reflect.Map:
  1286  				fn.fe = (*Encoder).kMap
  1287  				fn.fd = (*Decoder).kMap
  1288  			case reflect.Interface:
  1289  				// encode: reflect.Interface are handled already by preEncodeValue
  1290  				fn.fd = (*Decoder).kInterface
  1291  				fn.fe = (*Encoder).kErr
  1292  			default:
  1293  				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
  1294  				fn.fe = (*Encoder).kErr
  1295  				fn.fd = (*Decoder).kErr
  1296  			}
  1297  		}
  1298  	}
  1299  	return
  1300  }
  1301  
  1302  // Handle defines a specific encoding format. It also stores any runtime state
  1303  // used during an Encoding or Decoding session e.g. stored state about Types, etc.
  1304  //
  1305  // Once a handle is configured, it can be shared across multiple Encoders and Decoders.
  1306  //
  1307  // Note that a Handle is NOT safe for concurrent modification.
  1308  //
  1309  // A Handle also should not be modified after it is configured and has
  1310  // been used at least once. This is because stored state may be out of sync with the
  1311  // new configuration, and a data race can occur when multiple goroutines access it.
  1312  // i.e. multiple Encoders or Decoders in different goroutines.
  1313  //
  1314  // Consequently, the typical usage model is that a Handle is pre-configured
  1315  // before first time use, and not modified while in use.
  1316  // Such a pre-configured Handle is safe for concurrent access.
  1317  type Handle interface {
  1318  	Name() string
  1319  	getBasicHandle() *BasicHandle
  1320  	newEncDriver() encDriver
  1321  	newDecDriver() decDriver
  1322  	isBinary() bool
  1323  	isJson() bool // json is special for now, so track it
  1324  	// desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
  1325  	desc(bd byte) string
  1326  	// init initializes the handle based on handle-specific info (beyond what is in BasicHandle)
  1327  	init()
  1328  }
  1329  
  1330  // Raw represents raw formatted bytes.
  1331  // We "blindly" store it during encode and retrieve the raw bytes during decode.
  1332  // Note: it is dangerous during encode, so we may gate the behaviour
  1333  // behind an Encode flag which must be explicitly set.
  1334  type Raw []byte
  1335  
  1336  // RawExt represents raw unprocessed extension data.
  1337  // Some codecs will decode extension data as a *RawExt
  1338  // if there is no registered extension for the tag.
  1339  //
  1340  // Only one of Data or Value is nil.
  1341  // If Data is nil, then the content of the RawExt is in the Value.
  1342  type RawExt struct {
  1343  	Tag uint64
  1344  	// Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
  1345  	// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
  1346  	Data []byte
  1347  	// Value represents the extension, if Data is nil.
  1348  	// Value is used by codecs (e.g. cbor, json) which leverage the format to do
  1349  	// custom serialization of the types.
  1350  	Value interface{}
  1351  }
  1352  
  1353  func (re *RawExt) setData(xbs []byte, zerocopy bool) {
  1354  	if zerocopy {
  1355  		re.Data = xbs
  1356  	} else {
  1357  		re.Data = append(re.Data[:0], xbs...)
  1358  	}
  1359  }
  1360  
  1361  // BytesExt handles custom (de)serialization of types to/from []byte.
  1362  // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
  1363  type BytesExt interface {
  1364  	// WriteExt converts a value to a []byte.
  1365  	//
  1366  	// Note: v is a pointer iff the registered extension type is a struct or array kind.
  1367  	WriteExt(v interface{}) []byte
  1368  
  1369  	// ReadExt updates a value from a []byte.
  1370  	//
  1371  	// Note: dst is always a pointer kind to the registered extension type.
  1372  	ReadExt(dst interface{}, src []byte)
  1373  }
  1374  
  1375  // InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
  1376  // The Encoder or Decoder will then handle the further (de)serialization of that known type.
  1377  //
  1378  // It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
  1379  type InterfaceExt interface {
  1380  	// ConvertExt converts a value into a simpler interface for easy encoding
  1381  	// e.g. convert time.Time to int64.
  1382  	//
  1383  	// Note: v is a pointer iff the registered extension type is a struct or array kind.
  1384  	ConvertExt(v interface{}) interface{}
  1385  
  1386  	// UpdateExt updates a value from a simpler interface for easy decoding
  1387  	// e.g. convert int64 to time.Time.
  1388  	//
  1389  	// Note: dst is always a pointer kind to the registered extension type.
  1390  	UpdateExt(dst interface{}, src interface{})
  1391  }
  1392  
  1393  // Ext handles custom (de)serialization of custom types / extensions.
  1394  type Ext interface {
  1395  	BytesExt
  1396  	InterfaceExt
  1397  }
  1398  
  1399  // addExtWrapper is a wrapper implementation to support former AddExt exported method.
  1400  type addExtWrapper struct {
  1401  	encFn func(reflect.Value) ([]byte, error)
  1402  	decFn func(reflect.Value, []byte) error
  1403  }
  1404  
  1405  func (x addExtWrapper) WriteExt(v interface{}) []byte {
  1406  	bs, err := x.encFn(reflect.ValueOf(v))
  1407  	halt.onerror(err)
  1408  	return bs
  1409  }
  1410  
  1411  func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
  1412  	halt.onerror(x.decFn(reflect.ValueOf(v), bs))
  1413  }
  1414  
  1415  func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
  1416  	return x.WriteExt(v)
  1417  }
  1418  
  1419  func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
  1420  	x.ReadExt(dest, v.([]byte))
  1421  }
  1422  
  1423  type bytesExtFailer struct{}
  1424  
  1425  func (bytesExtFailer) WriteExt(v interface{}) []byte {
  1426  	halt.onerror(errExtFnWriteExtUnsupported)
  1427  	return nil
  1428  }
  1429  func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
  1430  	halt.onerror(errExtFnReadExtUnsupported)
  1431  }
  1432  
  1433  type interfaceExtFailer struct{}
  1434  
  1435  func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
  1436  	halt.onerror(errExtFnConvertExtUnsupported)
  1437  	return nil
  1438  }
  1439  func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
  1440  	halt.onerror(errExtFnUpdateExtUnsupported)
  1441  }
  1442  
  1443  type bytesExtWrapper struct {
  1444  	interfaceExtFailer
  1445  	BytesExt
  1446  }
  1447  
  1448  type interfaceExtWrapper struct {
  1449  	bytesExtFailer
  1450  	InterfaceExt
  1451  }
  1452  
  1453  type extFailWrapper struct {
  1454  	bytesExtFailer
  1455  	interfaceExtFailer
  1456  }
  1457  
  1458  type binaryEncodingType struct{}
  1459  
  1460  func (binaryEncodingType) isBinary() bool { return true }
  1461  func (binaryEncodingType) isJson() bool   { return false }
  1462  
  1463  type textEncodingType struct{}
  1464  
  1465  func (textEncodingType) isBinary() bool { return false }
  1466  func (textEncodingType) isJson() bool   { return false }
  1467  
  1468  type notJsonType struct{}
  1469  
  1470  func (notJsonType) isJson() bool { return false }
  1471  
  1472  // noBuiltInTypes is embedded into many types which do not support builtins
  1473  // e.g. msgpack, simple, cbor.
  1474  
  1475  type noBuiltInTypes struct{}
  1476  
  1477  func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
  1478  func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
  1479  
  1480  // bigenHelper handles ByteOrder operations directly using
  1481  // arrays of bytes (not slice of bytes).
  1482  //
  1483  // Since byteorder operations are very common for encoding and decoding
  1484  // numbers, lengths, etc - it is imperative that this operation is as
  1485  // fast as possible. Removing indirection (pointer chasing) to look
  1486  // at up to 8 bytes helps a lot here.
  1487  //
  1488  // For times where it is expedient to use a slice, delegate to the
  1489  // bigenstd (equal to the binary.BigEndian value).
  1490  //
  1491  // retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder)
  1492  type bigenHelper struct{}
  1493  
  1494  func (z bigenHelper) PutUint16(v uint16) (b [2]byte) {
  1495  	return [...]byte{
  1496  		byte(v >> 8),
  1497  		byte(v),
  1498  	}
  1499  }
  1500  
  1501  func (z bigenHelper) PutUint32(v uint32) (b [4]byte) {
  1502  	return [...]byte{
  1503  		byte(v >> 24),
  1504  		byte(v >> 16),
  1505  		byte(v >> 8),
  1506  		byte(v),
  1507  	}
  1508  }
  1509  
  1510  func (z bigenHelper) PutUint64(v uint64) (b [8]byte) {
  1511  	return [...]byte{
  1512  		byte(v >> 56),
  1513  		byte(v >> 48),
  1514  		byte(v >> 40),
  1515  		byte(v >> 32),
  1516  		byte(v >> 24),
  1517  		byte(v >> 16),
  1518  		byte(v >> 8),
  1519  		byte(v),
  1520  	}
  1521  }
  1522  
  1523  func (z bigenHelper) Uint16(b [2]byte) (v uint16) {
  1524  	return uint16(b[1]) |
  1525  		uint16(b[0])<<8
  1526  }
  1527  
  1528  func (z bigenHelper) Uint32(b [4]byte) (v uint32) {
  1529  	return uint32(b[3]) |
  1530  		uint32(b[2])<<8 |
  1531  		uint32(b[1])<<16 |
  1532  		uint32(b[0])<<24
  1533  }
  1534  
  1535  func (z bigenHelper) Uint64(b [8]byte) (v uint64) {
  1536  	return uint64(b[7]) |
  1537  		uint64(b[6])<<8 |
  1538  		uint64(b[5])<<16 |
  1539  		uint64(b[4])<<24 |
  1540  		uint64(b[3])<<32 |
  1541  		uint64(b[2])<<40 |
  1542  		uint64(b[1])<<48 |
  1543  		uint64(b[0])<<56
  1544  }
  1545  
  1546  func (z bigenHelper) writeUint16(w *encWr, v uint16) {
  1547  	x := z.PutUint16(v)
  1548  	w.writen2(x[0], x[1])
  1549  }
  1550  
  1551  func (z bigenHelper) writeUint32(w *encWr, v uint32) {
  1552  	w.writen4(z.PutUint32(v))
  1553  }
  1554  
  1555  func (z bigenHelper) writeUint64(w *encWr, v uint64) {
  1556  	w.writen8(z.PutUint64(v))
  1557  }
  1558  
  1559  type extTypeTagFn struct {
  1560  	rtid    uintptr
  1561  	rtidptr uintptr
  1562  	rt      reflect.Type
  1563  	tag     uint64
  1564  	ext     Ext
  1565  }
  1566  
  1567  type extHandle []extTypeTagFn
  1568  
  1569  // AddExt registes an encode and decode function for a reflect.Type.
  1570  // To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
  1571  //
  1572  // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1573  func (x *BasicHandle) AddExt(rt reflect.Type, tag byte,
  1574  	encfn func(reflect.Value) ([]byte, error),
  1575  	decfn func(reflect.Value, []byte) error) (err error) {
  1576  	if encfn == nil || decfn == nil {
  1577  		return x.SetExt(rt, uint64(tag), nil)
  1578  	}
  1579  	return x.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
  1580  }
  1581  
  1582  // SetExt will set the extension for a tag and reflect.Type.
  1583  // Note that the type must be a named type, and specifically not a pointer or Interface.
  1584  // An error is returned if that is not honored.
  1585  // To Deregister an ext, call SetExt with nil Ext.
  1586  //
  1587  // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1588  func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
  1589  	if x.isInited() {
  1590  		return errHandleInited
  1591  	}
  1592  	if x.basicHandleRuntimeState == nil {
  1593  		x.basicHandleRuntimeState = new(basicHandleRuntimeState)
  1594  	}
  1595  	return x.basicHandleRuntimeState.setExt(rt, tag, ext)
  1596  }
  1597  
  1598  func (o extHandle) getExtForI(x interface{}) (v *extTypeTagFn) {
  1599  	if len(o) > 0 {
  1600  		v = o.getExt(i2rtid(x), true)
  1601  	}
  1602  	return
  1603  }
  1604  
  1605  func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
  1606  	if !check {
  1607  		return
  1608  	}
  1609  	for i := range o {
  1610  		v = &o[i]
  1611  		if v.rtid == rtid || v.rtidptr == rtid {
  1612  			return
  1613  		}
  1614  	}
  1615  	return nil
  1616  }
  1617  
  1618  func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
  1619  	for i := range o {
  1620  		v = &o[i]
  1621  		if v.tag == tag {
  1622  			return
  1623  		}
  1624  	}
  1625  	return nil
  1626  }
  1627  
  1628  type intf2impl struct {
  1629  	rtid uintptr // for intf
  1630  	impl reflect.Type
  1631  }
  1632  
  1633  type intf2impls []intf2impl
  1634  
  1635  // Intf2Impl maps an interface to an implementing type.
  1636  // This allows us support infering the concrete type
  1637  // and populating it when passed an interface.
  1638  // e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
  1639  //
  1640  // Passing a nil impl will clear the mapping.
  1641  func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
  1642  	if impl != nil && !impl.Implements(intf) {
  1643  		return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
  1644  	}
  1645  	rtid := rt2id(intf)
  1646  	o2 := *o
  1647  	for i := range o2 {
  1648  		v := &o2[i]
  1649  		if v.rtid == rtid {
  1650  			v.impl = impl
  1651  			return
  1652  		}
  1653  	}
  1654  	*o = append(o2, intf2impl{rtid, impl})
  1655  	return
  1656  }
  1657  
  1658  func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
  1659  	for i := range o {
  1660  		v := &o[i]
  1661  		if v.rtid == rtid {
  1662  			if v.impl == nil {
  1663  				return
  1664  			}
  1665  			vkind := v.impl.Kind()
  1666  			if vkind == reflect.Ptr {
  1667  				return reflect.New(v.impl.Elem())
  1668  			}
  1669  			return rvZeroAddrK(v.impl, vkind)
  1670  		}
  1671  	}
  1672  	return
  1673  }
  1674  
  1675  // structFieldinfopathNode is a node in a tree, which allows us easily
  1676  // walk the anonymous path.
  1677  //
  1678  // In the typical case, the node is not embedded/anonymous, and thus the parent
  1679  // will be nil and this information becomes a value (not needing any indirection).
  1680  type structFieldInfoPathNode struct {
  1681  	parent *structFieldInfoPathNode
  1682  
  1683  	offset   uint16
  1684  	index    uint16
  1685  	kind     uint8
  1686  	numderef uint8
  1687  
  1688  	// encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo,
  1689  	// but are kept here for tighter packaging.
  1690  
  1691  	encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
  1692  	omitEmpty            bool
  1693  
  1694  	typ reflect.Type
  1695  }
  1696  
  1697  // depth returns number of valid nodes in the hierachy
  1698  func (path *structFieldInfoPathNode) depth() (d int) {
  1699  TOP:
  1700  	if path != nil {
  1701  		d++
  1702  		path = path.parent
  1703  		goto TOP
  1704  	}
  1705  	return
  1706  }
  1707  
  1708  // field returns the field of the struct.
  1709  func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) {
  1710  	if parent := path.parent; parent != nil {
  1711  		v = parent.field(v)
  1712  		for j, k := uint8(0), parent.numderef; j < k; j++ {
  1713  			if rvIsNil(v) {
  1714  				return
  1715  			}
  1716  			v = v.Elem()
  1717  		}
  1718  	}
  1719  	return path.rvField(v)
  1720  }
  1721  
  1722  // fieldAlloc returns the field of the struct.
  1723  // It allocates if a nil value was seen while searching.
  1724  func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) {
  1725  	if parent := path.parent; parent != nil {
  1726  		v = parent.fieldAlloc(v)
  1727  		for j, k := uint8(0), parent.numderef; j < k; j++ {
  1728  			if rvIsNil(v) {
  1729  				rvSetDirect(v, reflect.New(rvType(v).Elem()))
  1730  			}
  1731  			v = v.Elem()
  1732  		}
  1733  	}
  1734  	return path.rvField(v)
  1735  }
  1736  
  1737  type structFieldInfo struct {
  1738  	encName string // encode name
  1739  
  1740  	// encNameHash uintptr
  1741  
  1742  	// fieldName string // currently unused
  1743  
  1744  	// encNameAsciiAlphaNum and omitEmpty should be here,
  1745  	// but are stored in structFieldInfoPathNode for tighter packaging.
  1746  
  1747  	path structFieldInfoPathNode
  1748  }
  1749  
  1750  func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
  1751  	keytype = valueTypeString // default
  1752  	if stag == "" {
  1753  		return
  1754  	}
  1755  	ss := strings.Split(stag, ",")
  1756  	if len(ss) < 2 {
  1757  		return
  1758  	}
  1759  	for _, s := range ss[1:] {
  1760  		switch s {
  1761  		case "omitempty":
  1762  			omitEmpty = true
  1763  		case "toarray":
  1764  			toArray = true
  1765  		case "int":
  1766  			keytype = valueTypeInt
  1767  		case "uint":
  1768  			keytype = valueTypeUint
  1769  		case "float":
  1770  			keytype = valueTypeFloat
  1771  			// case "bool":
  1772  			// 	keytype = valueTypeBool
  1773  		case "string":
  1774  			keytype = valueTypeString
  1775  		}
  1776  	}
  1777  	return
  1778  }
  1779  
  1780  func (si *structFieldInfo) parseTag(stag string) {
  1781  	if stag == "" {
  1782  		return
  1783  	}
  1784  	for i, s := range strings.Split(stag, ",") {
  1785  		if i == 0 {
  1786  			if s != "" {
  1787  				si.encName = s
  1788  			}
  1789  		} else {
  1790  			switch s {
  1791  			case "omitempty":
  1792  				si.path.omitEmpty = true
  1793  			}
  1794  		}
  1795  	}
  1796  }
  1797  
  1798  type sfiSortedByEncName []*structFieldInfo
  1799  
  1800  func (p sfiSortedByEncName) Len() int           { return len(p) }
  1801  func (p sfiSortedByEncName) Swap(i, j int)      { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
  1802  func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
  1803  
  1804  // typeInfo4Container holds information that is only available for
  1805  // containers like map, array, chan, slice.
  1806  type typeInfo4Container struct {
  1807  	elem reflect.Type
  1808  	// key is:
  1809  	//   - if map kind: map key
  1810  	//   - if array kind: sliceOf(elem)
  1811  	//   - if chan kind: sliceof(elem)
  1812  	key reflect.Type
  1813  
  1814  	// fastpathUnderlying is underlying type of a named slice/map/array, as defined by go spec,
  1815  	// that is used by fastpath where we defined fastpath functions for the underlying type.
  1816  	//
  1817  	// for a map, it's a map; for a slice or array, it's a slice; else its nil.
  1818  	fastpathUnderlying reflect.Type
  1819  
  1820  	tikey  *typeInfo
  1821  	tielem *typeInfo
  1822  }
  1823  
  1824  // typeInfo keeps static (non-changing readonly)information
  1825  // about each (non-ptr) type referenced in the encode/decode sequence.
  1826  //
  1827  // During an encode/decode sequence, we work as below:
  1828  //   - If base is a built in type, en/decode base value
  1829  //   - If base is registered as an extension, en/decode base value
  1830  //   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
  1831  //   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
  1832  //   - Else decode appropriately based on the reflect.Kind
  1833  type typeInfo struct {
  1834  	rt  reflect.Type
  1835  	ptr reflect.Type
  1836  
  1837  	// pkgpath string
  1838  
  1839  	rtid uintptr
  1840  
  1841  	numMeth uint16 // number of methods
  1842  	kind    uint8
  1843  	chandir uint8
  1844  
  1845  	anyOmitEmpty bool      // true if a struct, and any of the fields are tagged "omitempty"
  1846  	toArray      bool      // whether this (struct) type should be encoded as an array
  1847  	keyType      valueType // if struct, how is the field name stored in a stream? default is string
  1848  	mbs          bool      // base type (T or *T) is a MapBySlice
  1849  
  1850  	sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name
  1851  
  1852  	*typeInfo4Container
  1853  
  1854  	// ---- cpu cache line boundary?
  1855  
  1856  	size, keysize, elemsize uint32
  1857  
  1858  	keykind, elemkind uint8
  1859  
  1860  	flagHasPkgPath   bool // Type.PackagePath != ""
  1861  	flagCustom       bool // does this have custom implementation?
  1862  	flagComparable   bool
  1863  	flagCanTransient bool
  1864  
  1865  	flagSelferViaCodecgen bool
  1866  
  1867  	// custom implementation flags
  1868  	flagIsZeroer    bool
  1869  	flagIsZeroerPtr bool
  1870  
  1871  	flagIsCodecEmptyer    bool
  1872  	flagIsCodecEmptyerPtr bool
  1873  
  1874  	flagBinaryMarshaler    bool
  1875  	flagBinaryMarshalerPtr bool
  1876  
  1877  	flagBinaryUnmarshaler    bool
  1878  	flagBinaryUnmarshalerPtr bool
  1879  
  1880  	flagTextMarshaler    bool
  1881  	flagTextMarshalerPtr bool
  1882  
  1883  	flagTextUnmarshaler    bool
  1884  	flagTextUnmarshalerPtr bool
  1885  
  1886  	flagJsonMarshaler    bool
  1887  	flagJsonMarshalerPtr bool
  1888  
  1889  	flagJsonUnmarshaler    bool
  1890  	flagJsonUnmarshalerPtr bool
  1891  
  1892  	flagSelfer    bool
  1893  	flagSelferPtr bool
  1894  
  1895  	flagMissingFielder    bool
  1896  	flagMissingFielderPtr bool
  1897  
  1898  	infoFieldOmitempty bool
  1899  
  1900  	sfi structFieldInfos
  1901  }
  1902  
  1903  func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) {
  1904  	return ti.sfi4Name[string(name)]
  1905  }
  1906  
  1907  func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) {
  1908  	n = len(x)
  1909  
  1910  	for i := range x {
  1911  		ui := uint16(i)
  1912  		xn := x[i].encName
  1913  		j, ok := ss[xn]
  1914  		if ok {
  1915  			i2clear := ui                              // index to be cleared
  1916  			if x[i].path.depth() < x[j].path.depth() { // this one is shallower
  1917  				ss[xn] = ui
  1918  				i2clear = j
  1919  			}
  1920  			if x[i2clear].encName != "" {
  1921  				x[i2clear].encName = ""
  1922  				n--
  1923  			}
  1924  		} else {
  1925  			ss[xn] = ui
  1926  		}
  1927  	}
  1928  
  1929  	return
  1930  }
  1931  
  1932  func (ti *typeInfo) init(x []structFieldInfo, n int) {
  1933  	var anyOmitEmpty bool
  1934  
  1935  	// remove all the nils (non-ready)
  1936  	m := make(map[string]*structFieldInfo, n)
  1937  	w := make([]structFieldInfo, n)
  1938  	y := make([]*structFieldInfo, n+n)
  1939  	z := y[n:]
  1940  	y = y[:n]
  1941  	n = 0
  1942  	for i := range x {
  1943  		if x[i].encName == "" {
  1944  			continue
  1945  		}
  1946  		if !anyOmitEmpty && x[i].path.omitEmpty {
  1947  			anyOmitEmpty = true
  1948  		}
  1949  		w[n] = x[i]
  1950  		y[n] = &w[n]
  1951  		m[x[i].encName] = &w[n]
  1952  		n++
  1953  	}
  1954  	if n != len(y) {
  1955  		halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n)
  1956  	}
  1957  
  1958  	copy(z, y)
  1959  	sort.Sort(sfiSortedByEncName(z))
  1960  
  1961  	ti.anyOmitEmpty = anyOmitEmpty
  1962  	ti.sfi.load(y, z)
  1963  	ti.sfi4Name = m
  1964  }
  1965  
  1966  // Handling flagCanTransient
  1967  //
  1968  // We support transient optimization if the kind of the type is
  1969  // a number, bool, string, or slice.
  1970  // In addition, we also support if the kind is struct or array,
  1971  // and the type does not contain any pointers recursively).
  1972  //
  1973  // Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers.
  1974  //
  1975  // If using transient for a type with a pointer, there is the potential for data corruption
  1976  // when GC tries to follow a "transient" pointer which may become a non-pointer soon after.
  1977  //
  1978  
  1979  func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) {
  1980  	var bs *bitset32
  1981  	if transientValueHasStringSlice {
  1982  		bs = &numBoolStrSliceBitset
  1983  	} else {
  1984  		bs = &numBoolBitset
  1985  	}
  1986  	if bs.isset(byte(k)) {
  1987  		v = true
  1988  	} else if k == reflect.Array {
  1989  		elem := t.Elem()
  1990  		v = isCanTransient(elem, elem.Kind())
  1991  	} else if k == reflect.Struct {
  1992  		v = true
  1993  		for j, jlen := 0, t.NumField(); j < jlen; j++ {
  1994  			f := t.Field(j)
  1995  			if !isCanTransient(f.Type, f.Type.Kind()) {
  1996  				v = false
  1997  				return
  1998  			}
  1999  		}
  2000  	} else {
  2001  		v = false
  2002  	}
  2003  	return
  2004  }
  2005  
  2006  func (ti *typeInfo) doSetFlagCanTransient() {
  2007  	if transientSizeMax > 0 {
  2008  		ti.flagCanTransient = ti.size <= transientSizeMax
  2009  	} else {
  2010  		ti.flagCanTransient = true
  2011  	}
  2012  	if ti.flagCanTransient {
  2013  		// if ti kind is a num, bool, string or slice, then it is flagCanTransient
  2014  		if !numBoolStrSliceBitset.isset(ti.kind) {
  2015  			ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind))
  2016  		}
  2017  	}
  2018  }
  2019  
  2020  type rtid2ti struct {
  2021  	rtid uintptr
  2022  	ti   *typeInfo
  2023  }
  2024  
  2025  // TypeInfos caches typeInfo for each type on first inspection.
  2026  //
  2027  // It is configured with a set of tag keys, which are used to get
  2028  // configuration for the type.
  2029  type TypeInfos struct {
  2030  	infos atomicTypeInfoSlice
  2031  	mu    sync.Mutex
  2032  	_     uint64 // padding (cache-aligned)
  2033  	tags  []string
  2034  	_     uint64 // padding (cache-aligned)
  2035  }
  2036  
  2037  // NewTypeInfos creates a TypeInfos given a set of struct tags keys.
  2038  //
  2039  // This allows users customize the struct tag keys which contain configuration
  2040  // of their types.
  2041  func NewTypeInfos(tags []string) *TypeInfos {
  2042  	return &TypeInfos{tags: tags}
  2043  }
  2044  
  2045  func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
  2046  	// check for tags: codec, json, in that order.
  2047  	// this allows seamless support for many configured structs.
  2048  	for _, x := range x.tags {
  2049  		s = t.Get(x)
  2050  		if s != "" {
  2051  			return s
  2052  		}
  2053  	}
  2054  	return
  2055  }
  2056  
  2057  func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
  2058  	// binary search. adapted from sort/search.go.
  2059  	// Note: we use goto (instead of for loop) so this can be inlined.
  2060  
  2061  	var h uint
  2062  	var j = uint(len(s))
  2063  LOOP:
  2064  	if i < j {
  2065  		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
  2066  		if s[h].rtid < rtid {
  2067  			i = h + 1
  2068  		} else {
  2069  			j = h
  2070  		}
  2071  		goto LOOP
  2072  	}
  2073  	if i < uint(len(s)) && s[i].rtid == rtid {
  2074  		ti = s[i].ti
  2075  	}
  2076  	return
  2077  }
  2078  
  2079  func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  2080  	if pti = x.find(rtid); pti == nil {
  2081  		pti = x.load(rt)
  2082  	}
  2083  	return
  2084  }
  2085  
  2086  func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) {
  2087  	sp := x.infos.load()
  2088  	if sp != nil {
  2089  		_, pti = findTypeInfo(sp, rtid)
  2090  	}
  2091  	return
  2092  }
  2093  
  2094  func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) {
  2095  	rk := rt.Kind()
  2096  
  2097  	if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
  2098  		halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
  2099  	}
  2100  
  2101  	rtid := rt2id(rt)
  2102  
  2103  	// do not hold lock while computing this.
  2104  	// it may lead to duplication, but that's ok.
  2105  	ti := typeInfo{
  2106  		rt:      rt,
  2107  		ptr:     reflect.PtrTo(rt),
  2108  		rtid:    rtid,
  2109  		kind:    uint8(rk),
  2110  		size:    uint32(rt.Size()),
  2111  		numMeth: uint16(rt.NumMethod()),
  2112  		keyType: valueTypeString, // default it - so it's never 0
  2113  
  2114  		// pkgpath: rt.PkgPath(),
  2115  		flagHasPkgPath: rt.PkgPath() != "",
  2116  	}
  2117  
  2118  	// bset sets custom implementation flags
  2119  	bset := func(when bool, b *bool) {
  2120  		if when {
  2121  			*b = true
  2122  			ti.flagCustom = true
  2123  		}
  2124  	}
  2125  
  2126  	var b1, b2 bool
  2127  
  2128  	b1, b2 = implIntf(rt, binaryMarshalerTyp)
  2129  	bset(b1, &ti.flagBinaryMarshaler)
  2130  	bset(b2, &ti.flagBinaryMarshalerPtr)
  2131  	b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
  2132  	bset(b1, &ti.flagBinaryUnmarshaler)
  2133  	bset(b2, &ti.flagBinaryUnmarshalerPtr)
  2134  	b1, b2 = implIntf(rt, textMarshalerTyp)
  2135  	bset(b1, &ti.flagTextMarshaler)
  2136  	bset(b2, &ti.flagTextMarshalerPtr)
  2137  	b1, b2 = implIntf(rt, textUnmarshalerTyp)
  2138  	bset(b1, &ti.flagTextUnmarshaler)
  2139  	bset(b2, &ti.flagTextUnmarshalerPtr)
  2140  	b1, b2 = implIntf(rt, jsonMarshalerTyp)
  2141  	bset(b1, &ti.flagJsonMarshaler)
  2142  	bset(b2, &ti.flagJsonMarshalerPtr)
  2143  	b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
  2144  	bset(b1, &ti.flagJsonUnmarshaler)
  2145  	bset(b2, &ti.flagJsonUnmarshalerPtr)
  2146  	b1, b2 = implIntf(rt, selferTyp)
  2147  	bset(b1, &ti.flagSelfer)
  2148  	bset(b2, &ti.flagSelferPtr)
  2149  	b1, b2 = implIntf(rt, missingFielderTyp)
  2150  	bset(b1, &ti.flagMissingFielder)
  2151  	bset(b2, &ti.flagMissingFielderPtr)
  2152  	b1, b2 = implIntf(rt, iszeroTyp)
  2153  	bset(b1, &ti.flagIsZeroer)
  2154  	bset(b2, &ti.flagIsZeroerPtr)
  2155  	b1, b2 = implIntf(rt, isCodecEmptyerTyp)
  2156  	bset(b1, &ti.flagIsCodecEmptyer)
  2157  	bset(b2, &ti.flagIsCodecEmptyerPtr)
  2158  
  2159  	b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp)
  2160  	ti.flagSelferViaCodecgen = b1 || b2
  2161  
  2162  	b1 = rt.Comparable()
  2163  	// bset(b1, &ti.flagComparable)
  2164  	ti.flagComparable = b1
  2165  
  2166  	ti.doSetFlagCanTransient()
  2167  
  2168  	var tt reflect.Type
  2169  	switch rk {
  2170  	case reflect.Struct:
  2171  		var omitEmpty bool
  2172  		if f, ok := rt.FieldByName(structInfoFieldName); ok {
  2173  			ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
  2174  			ti.infoFieldOmitempty = omitEmpty
  2175  		} else {
  2176  			ti.keyType = valueTypeString
  2177  		}
  2178  		pp, pi := &pool4tiload, pool4tiload.Get()
  2179  		pv := pi.(*typeInfoLoad)
  2180  		pv.reset()
  2181  		pv.etypes = append(pv.etypes, ti.rtid)
  2182  		x.rget(rt, rtid, nil, pv, omitEmpty)
  2183  		n := ti.resolve(pv.sfis, pv.sfiNames)
  2184  		ti.init(pv.sfis, n)
  2185  		pp.Put(pi)
  2186  	case reflect.Map:
  2187  		ti.typeInfo4Container = new(typeInfo4Container)
  2188  		ti.elem = rt.Elem()
  2189  		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  2190  		}
  2191  		ti.tielem = x.get(rt2id(tt), tt)
  2192  		ti.elemkind = uint8(ti.elem.Kind())
  2193  		ti.elemsize = uint32(ti.elem.Size())
  2194  		ti.key = rt.Key()
  2195  		for tt = ti.key; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  2196  		}
  2197  		ti.tikey = x.get(rt2id(tt), tt)
  2198  		ti.keykind = uint8(ti.key.Kind())
  2199  		ti.keysize = uint32(ti.key.Size())
  2200  		if ti.flagHasPkgPath {
  2201  			ti.fastpathUnderlying = reflect.MapOf(ti.key, ti.elem)
  2202  		}
  2203  	case reflect.Slice:
  2204  		ti.typeInfo4Container = new(typeInfo4Container)
  2205  		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
  2206  		if !ti.mbs && b2 {
  2207  			ti.mbs = b2
  2208  		}
  2209  		ti.elem = rt.Elem()
  2210  		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  2211  		}
  2212  		ti.tielem = x.get(rt2id(tt), tt)
  2213  		ti.elemkind = uint8(ti.elem.Kind())
  2214  		ti.elemsize = uint32(ti.elem.Size())
  2215  		if ti.flagHasPkgPath {
  2216  			ti.fastpathUnderlying = reflect.SliceOf(ti.elem)
  2217  		}
  2218  	case reflect.Chan:
  2219  		ti.typeInfo4Container = new(typeInfo4Container)
  2220  		ti.elem = rt.Elem()
  2221  		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  2222  		}
  2223  		ti.tielem = x.get(rt2id(tt), tt)
  2224  		ti.elemkind = uint8(ti.elem.Kind())
  2225  		ti.elemsize = uint32(ti.elem.Size())
  2226  		ti.chandir = uint8(rt.ChanDir())
  2227  		ti.key = reflect.SliceOf(ti.elem)
  2228  		ti.keykind = uint8(reflect.Slice)
  2229  	case reflect.Array:
  2230  		ti.typeInfo4Container = new(typeInfo4Container)
  2231  		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
  2232  		if !ti.mbs && b2 {
  2233  			ti.mbs = b2
  2234  		}
  2235  		ti.elem = rt.Elem()
  2236  		ti.elemkind = uint8(ti.elem.Kind())
  2237  		ti.elemsize = uint32(ti.elem.Size())
  2238  		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  2239  		}
  2240  		ti.tielem = x.get(rt2id(tt), tt)
  2241  		ti.key = reflect.SliceOf(ti.elem)
  2242  		ti.keykind = uint8(reflect.Slice)
  2243  		ti.keysize = uint32(ti.key.Size())
  2244  		if ti.flagHasPkgPath {
  2245  			ti.fastpathUnderlying = ti.key
  2246  		}
  2247  
  2248  		// MARKER: reflect.Ptr cannot happen here, as we halt early if reflect.Ptr passed in
  2249  		// case reflect.Ptr:
  2250  		// 	ti.elem = rt.Elem()
  2251  		// 	ti.elemkind = uint8(ti.elem.Kind())
  2252  		// 	ti.elemsize = uint32(ti.elem.Size())
  2253  	}
  2254  
  2255  	x.mu.Lock()
  2256  	sp := x.infos.load()
  2257  	// since this is an atomic load/store, we MUST use a different array each time,
  2258  	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
  2259  	if sp == nil {
  2260  		pti = &ti
  2261  		sp = []rtid2ti{{rtid, pti}}
  2262  		x.infos.store(sp)
  2263  	} else {
  2264  		var idx uint
  2265  		idx, pti = findTypeInfo(sp, rtid)
  2266  		if pti == nil {
  2267  			pti = &ti
  2268  			sp2 := make([]rtid2ti, len(sp)+1)
  2269  			copy(sp2[idx+1:], sp[idx:])
  2270  			copy(sp2, sp[:idx])
  2271  			sp2[idx] = rtid2ti{rtid, pti}
  2272  			x.infos.store(sp2)
  2273  		}
  2274  	}
  2275  	x.mu.Unlock()
  2276  	return
  2277  }
  2278  
  2279  func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
  2280  	path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) {
  2281  	// Read up fields and store how to access the value.
  2282  	//
  2283  	// It uses go's rules for message selectors,
  2284  	// which say that the field with the shallowest depth is selected.
  2285  	//
  2286  	// Note: we consciously use slices, not a map, to simulate a set.
  2287  	//       Typically, types have < 16 fields,
  2288  	//       and iteration using equals is faster than maps there
  2289  	flen := rt.NumField()
  2290  LOOP:
  2291  	for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
  2292  		f := rt.Field(int(j))
  2293  		fkind := f.Type.Kind()
  2294  
  2295  		// skip if a func type, or is unexported, or structTag value == "-"
  2296  		switch fkind {
  2297  		case reflect.Func, reflect.UnsafePointer:
  2298  			continue LOOP
  2299  		}
  2300  
  2301  		isUnexported := f.PkgPath != ""
  2302  		if isUnexported && !f.Anonymous {
  2303  			continue
  2304  		}
  2305  		stag := x.structTag(f.Tag)
  2306  		if stag == "-" {
  2307  			continue
  2308  		}
  2309  		var si structFieldInfo
  2310  
  2311  		var numderef uint8 = 0
  2312  		for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() {
  2313  			numderef++
  2314  		}
  2315  
  2316  		var parsed bool
  2317  		// if anonymous and no struct tag (or it's blank),
  2318  		// and a struct (or pointer to struct), inline it.
  2319  		if f.Anonymous && fkind != reflect.Interface {
  2320  			// ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
  2321  			ft := f.Type
  2322  			isPtr := ft.Kind() == reflect.Ptr
  2323  			for ft.Kind() == reflect.Ptr {
  2324  				ft = ft.Elem()
  2325  			}
  2326  			isStruct := ft.Kind() == reflect.Struct
  2327  
  2328  			// Ignore embedded fields of unexported non-struct types.
  2329  			// Also, from go1.10, ignore pointers to unexported struct types
  2330  			// because unmarshal cannot assign a new struct to an unexported field.
  2331  			// See https://golang.org/issue/21357
  2332  			if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
  2333  				continue
  2334  			}
  2335  			doInline := stag == ""
  2336  			if !doInline {
  2337  				si.parseTag(stag)
  2338  				parsed = true
  2339  				doInline = si.encName == "" // si.isZero()
  2340  			}
  2341  			if doInline && isStruct {
  2342  				// if etypes contains this, don't call rget again (as fields are already seen here)
  2343  				ftid := rt2id(ft)
  2344  				// We cannot recurse forever, but we need to track other field depths.
  2345  				// So - we break if we see a type twice (not the first time).
  2346  				// This should be sufficient to handle an embedded type that refers to its
  2347  				// owning type, which then refers to its embedded type.
  2348  				processIt := true
  2349  				numk := 0
  2350  				for _, k := range pv.etypes {
  2351  					if k == ftid {
  2352  						numk++
  2353  						if numk == rgetMaxRecursion {
  2354  							processIt = false
  2355  							break
  2356  						}
  2357  					}
  2358  				}
  2359  				if processIt {
  2360  					pv.etypes = append(pv.etypes, ftid)
  2361  					path2 := &structFieldInfoPathNode{
  2362  						parent:   path,
  2363  						typ:      f.Type,
  2364  						offset:   uint16(f.Offset),
  2365  						index:    j,
  2366  						kind:     uint8(fkind),
  2367  						numderef: numderef,
  2368  					}
  2369  					x.rget(ft, ftid, path2, pv, omitEmpty)
  2370  				}
  2371  				continue
  2372  			}
  2373  		}
  2374  
  2375  		// after the anonymous dance: if an unexported field, skip
  2376  		if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it
  2377  			continue
  2378  		}
  2379  
  2380  		si.path = structFieldInfoPathNode{
  2381  			parent:   path,
  2382  			typ:      f.Type,
  2383  			offset:   uint16(f.Offset),
  2384  			index:    j,
  2385  			kind:     uint8(fkind),
  2386  			numderef: numderef,
  2387  			// set asciiAlphaNum to true (default); checked and may be set to false below
  2388  			encNameAsciiAlphaNum: true,
  2389  			// note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward
  2390  			omitEmpty: si.path.omitEmpty,
  2391  		}
  2392  
  2393  		if !parsed {
  2394  			si.encName = f.Name
  2395  			si.parseTag(stag)
  2396  			parsed = true
  2397  		} else if si.encName == "" {
  2398  			si.encName = f.Name
  2399  		}
  2400  
  2401  		// si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName))
  2402  
  2403  		if omitEmpty {
  2404  			si.path.omitEmpty = true
  2405  		}
  2406  
  2407  		for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
  2408  			if !asciiAlphaNumBitset.isset(si.encName[i]) {
  2409  				si.path.encNameAsciiAlphaNum = false
  2410  				break
  2411  			}
  2412  		}
  2413  
  2414  		pv.sfis = append(pv.sfis, si)
  2415  	}
  2416  }
  2417  
  2418  func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
  2419  	// return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
  2420  
  2421  	// if I's method is defined on T (ie T implements I), then *T implements I.
  2422  	// The converse is not true.
  2423  
  2424  	// Type.Implements can be expensive, as it does a simulataneous linear search across 2 lists
  2425  	// with alphanumeric string comparisons.
  2426  	// If we can avoid running one of these 2 calls, we should.
  2427  
  2428  	base = rt.Implements(iTyp)
  2429  	if base {
  2430  		indir = true
  2431  	} else {
  2432  		indir = reflect.PtrTo(rt).Implements(iTyp)
  2433  	}
  2434  	return
  2435  }
  2436  
  2437  func isSliceBoundsError(s string) bool {
  2438  	return strings.Contains(s, "index out of range") ||
  2439  		strings.Contains(s, "slice bounds out of range")
  2440  }
  2441  
  2442  func sprintf(format string, v ...interface{}) string {
  2443  	return fmt.Sprintf(format, v...)
  2444  }
  2445  
  2446  func panicValToErr(h errDecorator, v interface{}, err *error) {
  2447  	if v == *err {
  2448  		return
  2449  	}
  2450  	switch xerr := v.(type) {
  2451  	case nil:
  2452  	case runtime.Error:
  2453  		d, dok := h.(*Decoder)
  2454  		if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
  2455  			*err = io.EOF
  2456  		} else {
  2457  			h.wrapErr(xerr, err)
  2458  		}
  2459  	case error:
  2460  		switch xerr {
  2461  		case nil:
  2462  		case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
  2463  			// treat as special (bubble up)
  2464  			*err = xerr
  2465  		default:
  2466  			h.wrapErr(xerr, err)
  2467  		}
  2468  	default:
  2469  		// we don't expect this to happen (as this library always panics with an error)
  2470  		h.wrapErr(fmt.Errorf("%v", v), err)
  2471  	}
  2472  }
  2473  
  2474  func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) {
  2475  	if slen <= 0 {
  2476  		return []byte{}, true
  2477  	}
  2478  	if cap(bs) < slen {
  2479  		return make([]byte, slen), true
  2480  	}
  2481  	return bs[:slen], false
  2482  }
  2483  
  2484  func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
  2485  	return mapKeyFastKindVals[k&31]
  2486  }
  2487  
  2488  // ----
  2489  
  2490  type codecFnInfo struct {
  2491  	ti     *typeInfo
  2492  	xfFn   Ext
  2493  	xfTag  uint64
  2494  	addrD  bool
  2495  	addrDf bool // force: if addrD, then decode function MUST take a ptr
  2496  	addrE  bool
  2497  	// addrEf bool // force: if addrE, then encode function MUST take a ptr
  2498  }
  2499  
  2500  // codecFn encapsulates the captured variables and the encode function.
  2501  // This way, we only do some calculations one times, and pass to the
  2502  // code block that should be called (encapsulated in a function)
  2503  // instead of executing the checks every time.
  2504  type codecFn struct {
  2505  	i  codecFnInfo
  2506  	fe func(*Encoder, *codecFnInfo, reflect.Value)
  2507  	fd func(*Decoder, *codecFnInfo, reflect.Value)
  2508  	// _  [1]uint64 // padding (cache-aligned)
  2509  }
  2510  
  2511  type codecRtidFn struct {
  2512  	rtid uintptr
  2513  	fn   *codecFn
  2514  }
  2515  
  2516  func makeExt(ext interface{}) Ext {
  2517  	switch t := ext.(type) {
  2518  	case Ext:
  2519  		return t
  2520  	case BytesExt:
  2521  		return &bytesExtWrapper{BytesExt: t}
  2522  	case InterfaceExt:
  2523  		return &interfaceExtWrapper{InterfaceExt: t}
  2524  	}
  2525  	return &extFailWrapper{}
  2526  }
  2527  
  2528  func baseRV(v interface{}) (rv reflect.Value) {
  2529  	// use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable
  2530  	for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
  2531  	}
  2532  	return
  2533  }
  2534  
  2535  // ----
  2536  
  2537  // these "checkOverflow" functions must be inlinable, and not call anybody.
  2538  // Overflow means that the value cannot be represented without wrapping/overflow.
  2539  // Overflow=false does not mean that the value can be represented without losing precision
  2540  // (especially for floating point).
  2541  
  2542  type checkOverflow struct{}
  2543  
  2544  func (checkOverflow) Float32(v float64) (overflow bool) {
  2545  	if v < 0 {
  2546  		v = -v
  2547  	}
  2548  	return math.MaxFloat32 < v && v <= math.MaxFloat64
  2549  }
  2550  func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
  2551  	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
  2552  		overflow = true
  2553  	}
  2554  	return
  2555  }
  2556  func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
  2557  	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
  2558  		overflow = true
  2559  	}
  2560  	return
  2561  }
  2562  
  2563  func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
  2564  	return (neg && v > 1<<63) || (!neg && v >= 1<<63)
  2565  }
  2566  
  2567  func (checkOverflow) SignedInt(v uint64) (overflow bool) {
  2568  	//e.g. -127 to 128 for int8
  2569  	pos := (v >> 63) == 0
  2570  	ui2 := v & 0x7fffffffffffffff
  2571  	if pos {
  2572  		if ui2 > math.MaxInt64 {
  2573  			overflow = true
  2574  		}
  2575  	} else {
  2576  		if ui2 > math.MaxInt64-1 {
  2577  			overflow = true
  2578  		}
  2579  	}
  2580  	return
  2581  }
  2582  
  2583  func (x checkOverflow) Float32V(v float64) float64 {
  2584  	if x.Float32(v) {
  2585  		halt.errorf("float32 overflow: %v", v)
  2586  	}
  2587  	return v
  2588  }
  2589  func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
  2590  	if x.Uint(v, bitsize) {
  2591  		halt.errorf("uint64 overflow: %v", v)
  2592  	}
  2593  	return v
  2594  }
  2595  func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
  2596  	if x.Int(v, bitsize) {
  2597  		halt.errorf("int64 overflow: %v", v)
  2598  	}
  2599  	return v
  2600  }
  2601  func (x checkOverflow) SignedIntV(v uint64) int64 {
  2602  	if x.SignedInt(v) {
  2603  		halt.errorf("uint64 to int64 overflow: %v", v)
  2604  	}
  2605  	return int64(v)
  2606  }
  2607  
  2608  // ------------------ FLOATING POINT -----------------
  2609  
  2610  func isNaN64(f float64) bool { return f != f }
  2611  
  2612  func isWhitespaceChar(v byte) bool {
  2613  	// these are in order of speed below ...
  2614  
  2615  	return v < 33
  2616  	// return v < 33 && whitespaceCharBitset64.isset(v)
  2617  	// return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
  2618  	// return v == ' ' || v == '\n' || v == '\t' || v == '\r'
  2619  	// return whitespaceCharBitset.isset(v)
  2620  }
  2621  
  2622  func isNumberChar(v byte) bool {
  2623  	// these are in order of speed below ...
  2624  
  2625  	return numCharBitset.isset(v)
  2626  	// return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
  2627  	// return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
  2628  }
  2629  
  2630  // -----------------------
  2631  
  2632  type ioFlusher interface {
  2633  	Flush() error
  2634  }
  2635  
  2636  type ioBuffered interface {
  2637  	Buffered() int
  2638  }
  2639  
  2640  // -----------------------
  2641  
  2642  type sfiRv struct {
  2643  	v *structFieldInfo
  2644  	r reflect.Value
  2645  }
  2646  
  2647  // ------
  2648  
  2649  // bitset types are better than [256]bool, because they permit the whole
  2650  // bitset array being on a single cache line and use less memory.
  2651  //
  2652  // Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
  2653  //
  2654  // We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
  2655  // bounds checking, so we discarded them, and everyone uses bitset256.
  2656  //
  2657  // given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
  2658  // consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
  2659  //
  2660  // Note that using >> or & is faster than using / or %, as division is quite expensive if not optimized.
  2661  
  2662  // MARKER:
  2663  // We noticed a little performance degradation when using bitset256 as [32]byte (or bitset32 as uint32).
  2664  // For example, json encoding went from 188K ns/op to 168K ns/op (~ 10% reduction).
  2665  // Consequently, we are using a [NNN]bool for bitsetNNN.
  2666  // To eliminate bounds-checking, we use x % v as that is guaranteed to be within bounds.
  2667  
  2668  // ----
  2669  type bitset32 [32]bool
  2670  
  2671  func (x *bitset32) set(pos byte) *bitset32 {
  2672  	x[pos&31] = true // x[pos%32] = true
  2673  	return x
  2674  }
  2675  func (x *bitset32) isset(pos byte) bool {
  2676  	return x[pos&31] // x[pos%32]
  2677  }
  2678  
  2679  type bitset256 [256]bool
  2680  
  2681  func (x *bitset256) set(pos byte) *bitset256 {
  2682  	x[pos] = true
  2683  	return x
  2684  }
  2685  func (x *bitset256) isset(pos byte) bool {
  2686  	return x[pos]
  2687  }
  2688  
  2689  // ------------
  2690  
  2691  type panicHdl struct{}
  2692  
  2693  // errorv will panic if err is defined (not nil)
  2694  func (panicHdl) onerror(err error) {
  2695  	if err != nil {
  2696  		panic(err)
  2697  	}
  2698  }
  2699  
  2700  // errorf will always panic, using the parameters passed.
  2701  //
  2702  // Note: it is ok to pass in a stringView, as it will just pass it directly
  2703  // to a fmt.Sprintf call and not hold onto it.
  2704  //
  2705  //go:noinline
  2706  func (panicHdl) errorf(format string, params ...interface{}) {
  2707  	if format == "" {
  2708  		panic(errPanicUndefined)
  2709  	}
  2710  	if len(params) == 0 {
  2711  		panic(errors.New(format))
  2712  	}
  2713  	panic(fmt.Errorf(format, params...))
  2714  }
  2715  
  2716  // ----------------------------------------------------
  2717  
  2718  type errDecorator interface {
  2719  	wrapErr(in error, out *error)
  2720  }
  2721  
  2722  type errDecoratorDef struct{}
  2723  
  2724  func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
  2725  
  2726  // ----------------------------------------------------
  2727  
  2728  type mustHdl struct{}
  2729  
  2730  func (mustHdl) String(s string, err error) string {
  2731  	halt.onerror(err)
  2732  	return s
  2733  }
  2734  func (mustHdl) Int(s int64, err error) int64 {
  2735  	halt.onerror(err)
  2736  	return s
  2737  }
  2738  func (mustHdl) Uint(s uint64, err error) uint64 {
  2739  	halt.onerror(err)
  2740  	return s
  2741  }
  2742  func (mustHdl) Float(s float64, err error) float64 {
  2743  	halt.onerror(err)
  2744  	return s
  2745  }
  2746  
  2747  // -------------------
  2748  
  2749  func freelistCapacity(length int) (capacity int) {
  2750  	for capacity = 8; capacity <= length; capacity *= 2 {
  2751  	}
  2752  	return
  2753  }
  2754  
  2755  // bytesFreelist is a list of byte buffers, sorted by cap.
  2756  //
  2757  // In anecdotal testing (running go test -tsd 1..6), we couldn't get
  2758  // the length ofthe list > 4 at any time. So we believe a linear search
  2759  // without bounds checking is sufficient.
  2760  //
  2761  // Typical usage model:
  2762  //   peek may go together with put, iff pop=true. peek gets largest byte slice temporarily.
  2763  //   check is used to switch a []byte if necessary
  2764  //   get/put go together
  2765  //
  2766  // Given that folks may get a []byte, and then append to it a lot which may re-allocate
  2767  // a new []byte, we should try to return both (one received from blist and new one allocated).
  2768  //
  2769  // Typical usage model for get/put, when we don't know whether we may need more than requested
  2770  //   v0 := blist.get()
  2771  //   v1 := v0
  2772  //   ... use v1 ...
  2773  //   blist.put(v1)
  2774  //   if byteSliceAddr(v0) != byteSliceAddr(v1) {
  2775  //     blist.put(v0)
  2776  //   }
  2777  //
  2778  type bytesFreelist [][]byte
  2779  
  2780  // peek returns a slice of possibly non-zero'ed bytes, with len=0,
  2781  // and with the largest capacity from the list.
  2782  func (x *bytesFreelist) peek(length int, pop bool) (out []byte) {
  2783  	if bytesFreeListNoCache {
  2784  		return make([]byte, 0, freelistCapacity(length))
  2785  	}
  2786  	y := *x
  2787  	if len(y) > 0 {
  2788  		out = y[len(y)-1]
  2789  	}
  2790  	// start buf with a minimum of 64 bytes
  2791  	const minLenBytes = 64
  2792  	if length < minLenBytes {
  2793  		length = minLenBytes
  2794  	}
  2795  	if cap(out) < length {
  2796  		out = make([]byte, 0, freelistCapacity(length))
  2797  		y = append(y, out)
  2798  		*x = y
  2799  	}
  2800  	if pop && len(y) > 0 {
  2801  		y = y[:len(y)-1]
  2802  		*x = y
  2803  	}
  2804  	return
  2805  }
  2806  
  2807  // get returns a slice of possibly non-zero'ed bytes, with len=0,
  2808  // and with cap >= length requested.
  2809  func (x *bytesFreelist) get(length int) (out []byte) {
  2810  	if bytesFreeListNoCache {
  2811  		return make([]byte, 0, freelistCapacity(length))
  2812  	}
  2813  	y := *x
  2814  	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2815  	// for i, v := range y {
  2816  	for i := 0; i < len(y); i++ {
  2817  		v := y[i]
  2818  		if cap(v) >= length {
  2819  			// *x = append(y[:i], y[i+1:]...)
  2820  			copy(y[i:], y[i+1:])
  2821  			*x = y[:len(y)-1]
  2822  			return v
  2823  		}
  2824  	}
  2825  	return make([]byte, 0, freelistCapacity(length))
  2826  }
  2827  
  2828  func (x *bytesFreelist) put(v []byte) {
  2829  	if bytesFreeListNoCache || cap(v) == 0 {
  2830  		return
  2831  	}
  2832  	if len(v) != 0 {
  2833  		v = v[:0]
  2834  	}
  2835  	// append the new value, then try to put it in a better position
  2836  	y := append(*x, v)
  2837  	*x = y
  2838  	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2839  	// for i, z := range y[:len(y)-1] {
  2840  	for i := 0; i < len(y)-1; i++ {
  2841  		z := y[i]
  2842  		if cap(z) > cap(v) {
  2843  			copy(y[i+1:], y[i:])
  2844  			y[i] = v
  2845  			return
  2846  		}
  2847  	}
  2848  }
  2849  
  2850  func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
  2851  	// ensure inlineable, by moving slow-path out to its own function
  2852  	if cap(v) >= length {
  2853  		return v[:0]
  2854  	}
  2855  	return x.checkPutGet(v, length)
  2856  }
  2857  
  2858  func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte {
  2859  	// checkPutGet broken out into its own function, so check is inlineable in general case
  2860  	const useSeparateCalls = false
  2861  
  2862  	if useSeparateCalls {
  2863  		x.put(v)
  2864  		return x.get(length)
  2865  	}
  2866  
  2867  	if bytesFreeListNoCache {
  2868  		return make([]byte, 0, freelistCapacity(length))
  2869  	}
  2870  
  2871  	// assume cap(v) < length, so put must happen before get
  2872  	y := *x
  2873  	var put = cap(v) == 0 // if empty, consider it already put
  2874  	if !put {
  2875  		y = append(y, v)
  2876  		*x = y
  2877  	}
  2878  	for i := 0; i < len(y); i++ {
  2879  		z := y[i]
  2880  		if put {
  2881  			if cap(z) >= length {
  2882  				copy(y[i:], y[i+1:])
  2883  				y = y[:len(y)-1]
  2884  				*x = y
  2885  				return z
  2886  			}
  2887  		} else {
  2888  			if cap(z) > cap(v) {
  2889  				copy(y[i+1:], y[i:])
  2890  				y[i] = v
  2891  				put = true
  2892  			}
  2893  		}
  2894  	}
  2895  	return make([]byte, 0, freelistCapacity(length))
  2896  }
  2897  
  2898  // -------------------------
  2899  
  2900  // sfiRvFreelist is used by Encoder for encoding structs,
  2901  // where we have to gather the fields first and then
  2902  // analyze them for omitEmpty, before knowing the length of the array/map to encode.
  2903  //
  2904  // Typically, the length here will depend on the number of cycles e.g.
  2905  // if type T1 has reference to T1, or T1 has reference to type T2 which has reference to T1.
  2906  //
  2907  // In the general case, the length of this list at most times is 1,
  2908  // so linear search is fine.
  2909  type sfiRvFreelist [][]sfiRv
  2910  
  2911  func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
  2912  	y := *x
  2913  
  2914  	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2915  	// for i, v := range y {
  2916  	for i := 0; i < len(y); i++ {
  2917  		v := y[i]
  2918  		if cap(v) >= length {
  2919  			// *x = append(y[:i], y[i+1:]...)
  2920  			copy(y[i:], y[i+1:])
  2921  			*x = y[:len(y)-1]
  2922  			return v
  2923  		}
  2924  	}
  2925  	return make([]sfiRv, 0, freelistCapacity(length))
  2926  }
  2927  
  2928  func (x *sfiRvFreelist) put(v []sfiRv) {
  2929  	if len(v) != 0 {
  2930  		v = v[:0]
  2931  	}
  2932  	// append the new value, then try to put it in a better position
  2933  	y := append(*x, v)
  2934  	*x = y
  2935  	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2936  	// for i, z := range y[:len(y)-1] {
  2937  	for i := 0; i < len(y)-1; i++ {
  2938  		z := y[i]
  2939  		if cap(z) > cap(v) {
  2940  			copy(y[i+1:], y[i:])
  2941  			y[i] = v
  2942  			return
  2943  		}
  2944  	}
  2945  }
  2946  
  2947  // ---- multiple interner implementations ----
  2948  
  2949  // Hard to tell which is most performant:
  2950  //   - use a map[string]string - worst perf, no collisions, and unlimited entries
  2951  //   - use a linear search with move to front heuristics - no collisions, and maxed at 64 entries
  2952  //   - use a computationally-intensive hash - best performance, some collisions, maxed at 64 entries
  2953  
  2954  const (
  2955  	internMaxStrLen = 16     // if more than 16 bytes, faster to copy than compare bytes
  2956  	internCap       = 64 * 2 // 64 uses 1K bytes RAM, so 128 (anecdotal sweet spot) uses 2K bytes
  2957  )
  2958  
  2959  type internerMap map[string]string
  2960  
  2961  func (x *internerMap) init() {
  2962  	*x = make(map[string]string, internCap)
  2963  }
  2964  
  2965  func (x internerMap) string(v []byte) (s string) {
  2966  	s, ok := x[string(v)] // no allocation here, per go implementation
  2967  	if !ok {
  2968  		s = string(v) // new allocation here
  2969  		x[s] = s
  2970  	}
  2971  	return
  2972  }