github.com/patricebensoussan/go/codec@v1.2.99/helper_unsafe.go (about)

     1  // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
     2  // Use of this source code is governed by a MIT license found in the LICENSE file.
     3  
     4  //go:build !safe && !codec.safe && !appengine && go1.9
     5  // +build !safe,!codec.safe,!appengine,go1.9
     6  
     7  // minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
     8  // - typedmemclr was introduced in go 1.8
     9  // - mapassign_fastXXX was introduced in go 1.9
    10  // etc
    11  
    12  package codec
    13  
    14  import (
    15  	"reflect"
    16  	_ "runtime" // needed for go linkname(s)
    17  	"sync/atomic"
    18  	"time"
    19  	"unsafe"
    20  )
    21  
    22  // This file has unsafe variants of some helper functions.
    23  // MARKER: See helper_unsafe.go for the usage documentation.
    24  
    25  // There are a number of helper_*unsafe*.go files.
    26  //
    27  // - helper_unsafe
    28  //   unsafe variants of dependent functions
    29  // - helper_unsafe_compiler_gc (gc)
    30  //   unsafe variants of dependent functions which cannot be shared with gollvm or gccgo
    31  // - helper_not_unsafe_not_gc (gccgo/gollvm or safe)
    32  //   safe variants of functions in helper_unsafe_compiler_gc
    33  // - helper_not_unsafe (safe)
    34  //   safe variants of functions in helper_unsafe
    35  // - helper_unsafe_compiler_not_gc (gccgo, gollvm)
    36  //   unsafe variants of functions/variables which non-standard compilers need
    37  //
    38  // This way, we can judiciously use build tags to include the right set of files
    39  // for any compiler, and make it run optimally in unsafe mode.
    40  //
    41  // As of March 2021, we cannot differentiate whether running with gccgo or gollvm
    42  // using a build constraint, as both satisfy 'gccgo' build tag.
    43  // Consequently, we must use the lowest common denominator to support both.
    44  
    45  // For reflect.Value code, we decided to do the following:
    46  //    - if we know the kind, we can elide conditional checks for
    47  //      - SetXXX (Int, Uint, String, Bool, etc)
    48  //      - SetLen
    49  //
    50  // We can also optimize
    51  //      - IsNil
    52  
    53  // MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
    54  //   - rvCopySlice:      decode calls it if rvGrowSlice didn't set the new slice into the pointer to the orig slice.
    55  //                       however, helper_unsafe sets it, so there's no need to call rvCopySlice later
    56  //   - rvSlice:          same as above
    57  //   - rvGetArray4Bytes: only called within kArray for []byte, but that is now handled
    58  //                       within the fast-path directly
    59  
    60  const safeMode = false
    61  
    62  // helperUnsafeDirectAssignMapEntry says that we should not copy the pointer in the map
    63  // to another value during mapRange/iteration and mapGet calls, but directly assign it.
    64  //
    65  // The only callers of mapRange/iteration is encode.
    66  // Here, we just walk through the values and encode them
    67  //
    68  // The only caller of mapGet is decode.
    69  // Here, it does a Get if the underlying value is a pointer, and decodes into that.
    70  //
    71  // For both users, we are very careful NOT to modify or keep the pointers around.
    72  // Consequently, it is ok for take advantage of the performance that the map is not modified
    73  // during an iteration and we can just "peek" at the internal value" in the map and use it.
    74  const helperUnsafeDirectAssignMapEntry = true
    75  
    76  // MARKER: keep in sync with GO_ROOT/src/reflect/value.go
    77  const (
    78  	unsafeFlagStickyRO = 1 << 5
    79  	unsafeFlagEmbedRO  = 1 << 6
    80  	unsafeFlagIndir    = 1 << 7
    81  	unsafeFlagAddr     = 1 << 8
    82  	unsafeFlagRO       = unsafeFlagStickyRO | unsafeFlagEmbedRO
    83  	// unsafeFlagKindMask = (1 << 5) - 1 // 5 bits for 27 kinds (up to 31)
    84  	// unsafeTypeKindDirectIface = 1 << 5
    85  )
    86  
    87  // transientSizeMax below is used in TransientAddr as the backing storage.
    88  //
    89  // Must be >= 16 as the maximum size is a complex128 (or string on 64-bit machines).
    90  const transientSizeMax = 64
    91  
    92  // should struct/array support internal strings and slices?
    93  const transientValueHasStringSlice = false
    94  
    95  type unsafeString struct {
    96  	Data unsafe.Pointer
    97  	Len  int
    98  }
    99  
   100  type unsafeSlice struct {
   101  	Data unsafe.Pointer
   102  	Len  int
   103  	Cap  int
   104  }
   105  
   106  type unsafeIntf struct {
   107  	typ unsafe.Pointer
   108  	ptr unsafe.Pointer
   109  }
   110  
   111  type unsafeReflectValue struct {
   112  	unsafeIntf
   113  	flag uintptr
   114  }
   115  
   116  // keep in sync with stdlib runtime/type.go
   117  type unsafeRuntimeType struct {
   118  	size uintptr
   119  	// ... many other fields here
   120  }
   121  
   122  // unsafeZeroAddr and unsafeZeroSlice points to a read-only block of memory
   123  // used for setting a zero value for most types or creating a read-only
   124  // zero value for a given type.
   125  var (
   126  	unsafeZeroAddr  = unsafe.Pointer(&unsafeZeroArr[0])
   127  	unsafeZeroSlice = unsafeSlice{unsafeZeroAddr, 0, 0}
   128  )
   129  
   130  // We use a scratch memory and an unsafeSlice for transient values:
   131  //
   132  // unsafeSlice is used for standalone strings and slices (outside an array or struct).
   133  // scratch memory is used for other kinds, based on contract below:
   134  // - numbers, bool are always transient
   135  // - structs and arrays are transient iff they have no pointers i.e.
   136  //   no string, slice, chan, func, interface, map, etc only numbers and bools.
   137  // - slices and strings are transient (using the unsafeSlice)
   138  
   139  type unsafePerTypeElem struct {
   140  	arr   [transientSizeMax]byte // for bool, number, struct, array kinds
   141  	slice unsafeSlice            // for string and slice kinds
   142  }
   143  
   144  func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
   145  	if k == reflect.String || k == reflect.Slice {
   146  		x.slice = unsafeSlice{} // memclr
   147  		return unsafe.Pointer(&x.slice)
   148  	}
   149  	x.arr = [transientSizeMax]byte{} // memclr
   150  	return unsafe.Pointer(&x.arr)
   151  }
   152  
   153  type perType struct {
   154  	elems [2]unsafePerTypeElem
   155  }
   156  
   157  type decPerType struct {
   158  	perType
   159  }
   160  
   161  type encPerType struct{}
   162  
   163  // TransientAddrK is used for getting a *transient* value to be decoded into,
   164  // which will right away be used for something else.
   165  //
   166  // See notes in helper.go about "Transient values during decoding"
   167  
   168  func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
   169  	return rvZeroAddrTransientAnyK(t, k, x.elems[0].addrFor(k))
   170  }
   171  
   172  func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) reflect.Value {
   173  	return rvZeroAddrTransientAnyK(t, k, x.elems[1].addrFor(k))
   174  }
   175  
   176  func (encPerType) AddressableRO(v reflect.Value) reflect.Value {
   177  	return rvAddressableReadonly(v)
   178  }
   179  
   180  // stringView returns a view of the []byte as a string.
   181  // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
   182  // In regular safe mode, it is an allocation and copy.
   183  func stringView(v []byte) string {
   184  	return *(*string)(unsafe.Pointer(&v))
   185  }
   186  
   187  // bytesView returns a view of the string as a []byte.
   188  // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
   189  // In regular safe mode, it is an allocation and copy.
   190  func bytesView(v string) (b []byte) {
   191  	sx := (*unsafeString)(unsafe.Pointer(&v))
   192  	bx := (*unsafeSlice)(unsafe.Pointer(&b))
   193  	bx.Data, bx.Len, bx.Cap = sx.Data, sx.Len, sx.Len
   194  	return
   195  }
   196  
   197  func byteSliceSameData(v1 []byte, v2 []byte) bool {
   198  	return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
   199  }
   200  
   201  // MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
   202  // These functions expect that the bounds are valid, and have been checked before this is called.
   203  // copy(...) does a number of checks which are unnecessary in this situation when in bounds.
   204  
   205  func okBytes3(b []byte) (v [4]byte) {
   206  	*(*[3]byte)(unsafe.Pointer(&v[1])) = *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
   207  	return
   208  }
   209  
   210  func okBytes4(b []byte) [4]byte {
   211  	return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
   212  }
   213  
   214  func okBytes8(b []byte) [8]byte {
   215  	return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
   216  }
   217  
   218  // isNil says whether the value v is nil.
   219  // This applies to references like map/ptr/unsafepointer/chan/func,
   220  // and non-reference values like interface/slice.
   221  func isNil(v interface{}) (rv reflect.Value, isnil bool) {
   222  	var ui = (*unsafeIntf)(unsafe.Pointer(&v))
   223  	isnil = ui.ptr == nil
   224  	if !isnil {
   225  		rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
   226  	}
   227  	return
   228  }
   229  
   230  func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
   231  	rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
   232  	tk := rv.Kind()
   233  	isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
   234  	return
   235  }
   236  
   237  // return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
   238  // true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
   239  //
   240  // Assumes that v is a reference (map/func/chan/ptr/func)
   241  func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
   242  	if v.flag&unsafeFlagIndir != 0 {
   243  		return *(*unsafe.Pointer)(v.ptr)
   244  	}
   245  	return v.ptr
   246  }
   247  
   248  func eq4i(i0, i1 interface{}) bool {
   249  	v0 := (*unsafeIntf)(unsafe.Pointer(&i0))
   250  	v1 := (*unsafeIntf)(unsafe.Pointer(&i1))
   251  	return v0.typ == v1.typ && v0.ptr == v1.ptr
   252  }
   253  
   254  func rv4iptr(i interface{}) (v reflect.Value) {
   255  	// Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
   256  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   257  	uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
   258  	uv.flag = uintptr(rkindPtr)
   259  	return
   260  }
   261  
   262  func rv4istr(i interface{}) (v reflect.Value) {
   263  	// Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
   264  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   265  	uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
   266  	uv.flag = uintptr(rkindString) | unsafeFlagIndir
   267  	return
   268  }
   269  
   270  func rv2i(rv reflect.Value) (i interface{}) {
   271  	// We tap into implememtation details from
   272  	// the source go stdlib reflect/value.go, and trims the implementation.
   273  	//
   274  	// e.g.
   275  	// - a map/ptr is a reference,        thus flagIndir is not set on it
   276  	// - an int/slice is not a reference, thus flagIndir is set on it
   277  
   278  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   279  	if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
   280  		urv.ptr = *(*unsafe.Pointer)(urv.ptr)
   281  	}
   282  	return *(*interface{})(unsafe.Pointer(&urv.unsafeIntf))
   283  }
   284  
   285  func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
   286  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   287  	urv.flag = (urv.flag & unsafeFlagRO) | uintptr(reflect.Ptr)
   288  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&ptrType))).ptr
   289  	return rv
   290  }
   291  
   292  func rvIsNil(rv reflect.Value) bool {
   293  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   294  	if urv.flag&unsafeFlagIndir != 0 {
   295  		return *(*unsafe.Pointer)(urv.ptr) == nil
   296  	}
   297  	return urv.ptr == nil
   298  }
   299  
   300  func rvSetSliceLen(rv reflect.Value, length int) {
   301  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   302  	(*unsafeString)(urv.ptr).Len = length
   303  }
   304  
   305  func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
   306  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   307  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   308  	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
   309  	urv.ptr = unsafeNew(urv.typ)
   310  	return
   311  }
   312  
   313  func rvZeroAddrTransientAnyK(t reflect.Type, k reflect.Kind, addr unsafe.Pointer) (rv reflect.Value) {
   314  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   315  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   316  	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
   317  	urv.ptr = addr
   318  	return
   319  }
   320  
   321  func rvZeroK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
   322  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   323  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   324  	if refBitset.isset(byte(k)) {
   325  		urv.flag = uintptr(k)
   326  	} else if rtsize2(urv.typ) <= uintptr(len(unsafeZeroArr)) {
   327  		urv.flag = uintptr(k) | unsafeFlagIndir
   328  		urv.ptr = unsafeZeroAddr
   329  	} else { // meaning struct or array
   330  		urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
   331  		urv.ptr = unsafeNew(urv.typ)
   332  	}
   333  	return
   334  }
   335  
   336  // rvConvert will convert a value to a different type directly,
   337  // ensuring that they still point to the same underlying value.
   338  func rvConvert(v reflect.Value, t reflect.Type) reflect.Value {
   339  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   340  	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   341  	return v
   342  }
   343  
   344  // rvAddressableReadonly returns an addressable reflect.Value.
   345  //
   346  // Use it within encode calls, when you just want to "read" the underlying ptr
   347  // without modifying the value.
   348  //
   349  // Note that it cannot be used for r/w use, as those non-addressable values
   350  // may have been stored in read-only memory, and trying to write the pointer
   351  // may cause a segfault.
   352  func rvAddressableReadonly(v reflect.Value) reflect.Value {
   353  	// hack to make an addressable value out of a non-addressable one.
   354  	// Assume folks calling it are passing a value that can be addressable, but isn't.
   355  	// This assumes that the flagIndir is already set on it.
   356  	// so we just set the flagAddr bit on the flag (and do not set the flagIndir).
   357  
   358  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   359  	uv.flag = uv.flag | unsafeFlagAddr // | unsafeFlagIndir
   360  
   361  	return v
   362  }
   363  
   364  func rtsize2(rt unsafe.Pointer) uintptr {
   365  	return ((*unsafeRuntimeType)(rt)).size
   366  }
   367  
   368  func rt2id(rt reflect.Type) uintptr {
   369  	return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).ptr)
   370  }
   371  
   372  func i2rtid(i interface{}) uintptr {
   373  	return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
   374  }
   375  
   376  // --------------------------
   377  
   378  func unsafeCmpZero(ptr unsafe.Pointer, size int) bool {
   379  	var s1 = unsafeString{ptr, size}
   380  	var s2 = unsafeString{unsafeZeroAddr, size}
   381  	if size > len(unsafeZeroArr) {
   382  		arr := make([]byte, size)
   383  		s2.Data = unsafe.Pointer(&arr[0])
   384  	}
   385  	return *(*string)(unsafe.Pointer(&s1)) == *(*string)(unsafe.Pointer(&s2)) // memcmp
   386  }
   387  
   388  func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
   389  	urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   390  	if urv.flag == 0 {
   391  		return true
   392  	}
   393  	if recursive {
   394  		return isEmptyValueFallbackRecur(urv, v, tinfos)
   395  	}
   396  	return unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
   397  }
   398  
   399  func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos *TypeInfos) bool {
   400  	const recursive = true
   401  
   402  	switch v.Kind() {
   403  	case reflect.Invalid:
   404  		return true
   405  	case reflect.String:
   406  		return (*unsafeString)(urv.ptr).Len == 0
   407  	case reflect.Slice:
   408  		return (*unsafeSlice)(urv.ptr).Len == 0
   409  	case reflect.Bool:
   410  		return !*(*bool)(urv.ptr)
   411  	case reflect.Int:
   412  		return *(*int)(urv.ptr) == 0
   413  	case reflect.Int8:
   414  		return *(*int8)(urv.ptr) == 0
   415  	case reflect.Int16:
   416  		return *(*int16)(urv.ptr) == 0
   417  	case reflect.Int32:
   418  		return *(*int32)(urv.ptr) == 0
   419  	case reflect.Int64:
   420  		return *(*int64)(urv.ptr) == 0
   421  	case reflect.Uint:
   422  		return *(*uint)(urv.ptr) == 0
   423  	case reflect.Uint8:
   424  		return *(*uint8)(urv.ptr) == 0
   425  	case reflect.Uint16:
   426  		return *(*uint16)(urv.ptr) == 0
   427  	case reflect.Uint32:
   428  		return *(*uint32)(urv.ptr) == 0
   429  	case reflect.Uint64:
   430  		return *(*uint64)(urv.ptr) == 0
   431  	case reflect.Uintptr:
   432  		return *(*uintptr)(urv.ptr) == 0
   433  	case reflect.Float32:
   434  		return *(*float32)(urv.ptr) == 0
   435  	case reflect.Float64:
   436  		return *(*float64)(urv.ptr) == 0
   437  	case reflect.Complex64:
   438  		return unsafeCmpZero(urv.ptr, 8)
   439  	case reflect.Complex128:
   440  		return unsafeCmpZero(urv.ptr, 16)
   441  	case reflect.Struct:
   442  		// return isEmptyStruct(v, tinfos, recursive)
   443  		if tinfos == nil {
   444  			tinfos = defTypeInfos
   445  		}
   446  		ti := tinfos.find(uintptr(urv.typ))
   447  		if ti == nil {
   448  			ti = tinfos.load(rvType(v))
   449  		}
   450  		return unsafeCmpZero(urv.ptr, int(ti.size))
   451  	case reflect.Interface, reflect.Ptr:
   452  		// isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
   453  		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
   454  		if recursive && !isnil {
   455  			return isEmptyValue(v.Elem(), tinfos, recursive)
   456  		}
   457  		return isnil
   458  	case reflect.UnsafePointer:
   459  		return urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
   460  	case reflect.Chan:
   461  		return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
   462  	case reflect.Map:
   463  		return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
   464  	case reflect.Array:
   465  		return v.Len() == 0
   466  	}
   467  	return false
   468  }
   469  
   470  // --------------------------
   471  
   472  type structFieldInfos struct {
   473  	c      unsafe.Pointer // source
   474  	s      unsafe.Pointer // sorted
   475  	length int
   476  }
   477  
   478  func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
   479  	s := (*unsafeSlice)(unsafe.Pointer(&sorted))
   480  	x.s = s.Data
   481  	x.length = s.Len
   482  	s = (*unsafeSlice)(unsafe.Pointer(&source))
   483  	x.c = s.Data
   484  }
   485  
   486  func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
   487  	*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
   488  	// s := (*unsafeSlice)(unsafe.Pointer(&v))
   489  	// s.Data = x.sorted0
   490  	// s.Len = x.length
   491  	// s.Cap = s.Len
   492  	return
   493  }
   494  
   495  func (x *structFieldInfos) source() (v []*structFieldInfo) {
   496  	*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.c, x.length, x.length}
   497  	return
   498  }
   499  
   500  // atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
   501  //
   502  // Note that we do not atomically load/store length and data pointer separately,
   503  // as this could lead to some races. Instead, we atomically load/store cappedSlice.
   504  //
   505  // Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
   506  
   507  // ----------------------
   508  type atomicTypeInfoSlice struct {
   509  	v unsafe.Pointer // *[]rtid2ti
   510  }
   511  
   512  func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
   513  	x2 := atomic.LoadPointer(&x.v)
   514  	if x2 != nil {
   515  		s = *(*[]rtid2ti)(x2)
   516  	}
   517  	return
   518  }
   519  
   520  func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
   521  	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
   522  }
   523  
   524  // MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
   525  // This is 2 words.
   526  // consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
   527  
   528  // --------------------------
   529  type atomicRtidFnSlice struct {
   530  	v unsafe.Pointer // *[]codecRtidFn
   531  }
   532  
   533  func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
   534  	x2 := atomic.LoadPointer(&x.v)
   535  	if x2 != nil {
   536  		s = *(*[]codecRtidFn)(x2)
   537  	}
   538  	return
   539  }
   540  
   541  func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
   542  	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
   543  }
   544  
   545  // --------------------------
   546  type atomicClsErr struct {
   547  	v unsafe.Pointer // *clsErr
   548  }
   549  
   550  func (x *atomicClsErr) load() (e clsErr) {
   551  	x2 := (*clsErr)(atomic.LoadPointer(&x.v))
   552  	if x2 != nil {
   553  		e = *x2
   554  	}
   555  	return
   556  }
   557  
   558  func (x *atomicClsErr) store(p clsErr) {
   559  	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
   560  }
   561  
   562  // --------------------------
   563  
   564  // to create a reflect.Value for each member field of fauxUnion,
   565  // we first create a global fauxUnion, and create reflect.Value
   566  // for them all.
   567  // This way, we have the flags and type in the reflect.Value.
   568  // Then, when a reflect.Value is called, we just copy it,
   569  // update the ptr to the fauxUnion's, and return it.
   570  
   571  type unsafeDecNakedWrapper struct {
   572  	fauxUnion
   573  	ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
   574  }
   575  
   576  func (n *unsafeDecNakedWrapper) init() {
   577  	n.ru = rv4iptr(&n.u).Elem()
   578  	n.ri = rv4iptr(&n.i).Elem()
   579  	n.rf = rv4iptr(&n.f).Elem()
   580  	n.rl = rv4iptr(&n.l).Elem()
   581  	n.rs = rv4iptr(&n.s).Elem()
   582  	n.rt = rv4iptr(&n.t).Elem()
   583  	n.rb = rv4iptr(&n.b).Elem()
   584  	// n.rr[] = reflect.ValueOf(&n.)
   585  }
   586  
   587  var defUnsafeDecNakedWrapper unsafeDecNakedWrapper
   588  
   589  func init() {
   590  	defUnsafeDecNakedWrapper.init()
   591  }
   592  
   593  func (n *fauxUnion) ru() (v reflect.Value) {
   594  	v = defUnsafeDecNakedWrapper.ru
   595  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
   596  	return
   597  }
   598  func (n *fauxUnion) ri() (v reflect.Value) {
   599  	v = defUnsafeDecNakedWrapper.ri
   600  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
   601  	return
   602  }
   603  func (n *fauxUnion) rf() (v reflect.Value) {
   604  	v = defUnsafeDecNakedWrapper.rf
   605  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
   606  	return
   607  }
   608  func (n *fauxUnion) rl() (v reflect.Value) {
   609  	v = defUnsafeDecNakedWrapper.rl
   610  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
   611  	return
   612  }
   613  func (n *fauxUnion) rs() (v reflect.Value) {
   614  	v = defUnsafeDecNakedWrapper.rs
   615  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
   616  	return
   617  }
   618  func (n *fauxUnion) rt() (v reflect.Value) {
   619  	v = defUnsafeDecNakedWrapper.rt
   620  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
   621  	return
   622  }
   623  func (n *fauxUnion) rb() (v reflect.Value) {
   624  	v = defUnsafeDecNakedWrapper.rb
   625  	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
   626  	return
   627  }
   628  
   629  // --------------------------
   630  func rvSetBytes(rv reflect.Value, v []byte) {
   631  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   632  	*(*[]byte)(urv.ptr) = v
   633  }
   634  
   635  func rvSetString(rv reflect.Value, v string) {
   636  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   637  	*(*string)(urv.ptr) = v
   638  }
   639  
   640  func rvSetBool(rv reflect.Value, v bool) {
   641  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   642  	*(*bool)(urv.ptr) = v
   643  }
   644  
   645  func rvSetTime(rv reflect.Value, v time.Time) {
   646  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   647  	*(*time.Time)(urv.ptr) = v
   648  }
   649  
   650  func rvSetFloat32(rv reflect.Value, v float32) {
   651  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   652  	*(*float32)(urv.ptr) = v
   653  }
   654  
   655  func rvSetFloat64(rv reflect.Value, v float64) {
   656  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   657  	*(*float64)(urv.ptr) = v
   658  }
   659  
   660  func rvSetComplex64(rv reflect.Value, v complex64) {
   661  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   662  	*(*complex64)(urv.ptr) = v
   663  }
   664  
   665  func rvSetComplex128(rv reflect.Value, v complex128) {
   666  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   667  	*(*complex128)(urv.ptr) = v
   668  }
   669  
   670  func rvSetInt(rv reflect.Value, v int) {
   671  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   672  	*(*int)(urv.ptr) = v
   673  }
   674  
   675  func rvSetInt8(rv reflect.Value, v int8) {
   676  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   677  	*(*int8)(urv.ptr) = v
   678  }
   679  
   680  func rvSetInt16(rv reflect.Value, v int16) {
   681  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   682  	*(*int16)(urv.ptr) = v
   683  }
   684  
   685  func rvSetInt32(rv reflect.Value, v int32) {
   686  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   687  	*(*int32)(urv.ptr) = v
   688  }
   689  
   690  func rvSetInt64(rv reflect.Value, v int64) {
   691  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   692  	*(*int64)(urv.ptr) = v
   693  }
   694  
   695  func rvSetUint(rv reflect.Value, v uint) {
   696  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   697  	*(*uint)(urv.ptr) = v
   698  }
   699  
   700  func rvSetUintptr(rv reflect.Value, v uintptr) {
   701  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   702  	*(*uintptr)(urv.ptr) = v
   703  }
   704  
   705  func rvSetUint8(rv reflect.Value, v uint8) {
   706  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   707  	*(*uint8)(urv.ptr) = v
   708  }
   709  
   710  func rvSetUint16(rv reflect.Value, v uint16) {
   711  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   712  	*(*uint16)(urv.ptr) = v
   713  }
   714  
   715  func rvSetUint32(rv reflect.Value, v uint32) {
   716  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   717  	*(*uint32)(urv.ptr) = v
   718  }
   719  
   720  func rvSetUint64(rv reflect.Value, v uint64) {
   721  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   722  	*(*uint64)(urv.ptr) = v
   723  }
   724  
   725  // ----------------
   726  
   727  // rvSetZero is rv.Set(reflect.Zero(rv.Type()) for all kinds (including reflect.Interface).
   728  func rvSetZero(rv reflect.Value) {
   729  	rvSetDirectZero(rv)
   730  }
   731  
   732  func rvSetIntf(rv reflect.Value, v reflect.Value) {
   733  	rv.Set(v)
   734  }
   735  
   736  // rvSetDirect is rv.Set for all kinds except reflect.Interface.
   737  //
   738  // Callers MUST not pass a value of kind reflect.Interface, as it may cause unexpected segfaults.
   739  func rvSetDirect(rv reflect.Value, v reflect.Value) {
   740  	// MARKER: rv.Set for kind reflect.Interface may do a separate allocation if a scalar value.
   741  	// The book-keeping is onerous, so we just do the simple ones where a memmove is sufficient.
   742  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   743  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   744  	if uv.flag&unsafeFlagIndir == 0 {
   745  		*(*unsafe.Pointer)(urv.ptr) = uv.ptr
   746  	} else if uv.ptr == unsafeZeroAddr {
   747  		if urv.ptr != unsafeZeroAddr {
   748  			typedmemclr(urv.typ, urv.ptr)
   749  		}
   750  	} else {
   751  		typedmemmove(urv.typ, urv.ptr, uv.ptr)
   752  	}
   753  }
   754  
   755  // rvSetDirectZero is rv.Set(reflect.Zero(rv.Type()) for all kinds except reflect.Interface.
   756  func rvSetDirectZero(rv reflect.Value) {
   757  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   758  	if urv.ptr != unsafeZeroAddr {
   759  		typedmemclr(urv.typ, urv.ptr)
   760  	}
   761  }
   762  
   763  // rvMakeSlice updates the slice to point to a new array.
   764  // It copies data from old slice to new slice.
   765  // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
   766  func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Value, set bool) {
   767  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   768  	ux := (*unsafeSlice)(urv.ptr)
   769  	t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
   770  	s := unsafeSlice{newarray(t, xcap), xlen, xcap}
   771  	if ux.Len > 0 {
   772  		typedslicecopy(t, s, *ux)
   773  	}
   774  	*ux = s
   775  	return rv, true
   776  }
   777  
   778  // rvSlice returns a sub-slice of the slice given new lenth,
   779  // without modifying passed in value.
   780  // It is typically called when we know that SetLen(...) cannot be done.
   781  func rvSlice(rv reflect.Value, length int) reflect.Value {
   782  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   783  	var x []struct{}
   784  	ux := (*unsafeSlice)(unsafe.Pointer(&x))
   785  	*ux = *(*unsafeSlice)(urv.ptr)
   786  	ux.Len = length
   787  	urv.ptr = unsafe.Pointer(ux)
   788  	return rv
   789  }
   790  
   791  // rcGrowSlice updates the slice to point to a new array with the cap incremented, and len set to the new cap value.
   792  // It copies data from old slice to new slice.
   793  // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
   794  func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
   795  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   796  	ux := (*unsafeSlice)(urv.ptr)
   797  	t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
   798  	*ux = unsafeGrowslice(t, *ux, cap, incr)
   799  	ux.Len = ux.Cap
   800  	return rv, ux.Cap, true
   801  }
   802  
   803  // ------------
   804  
   805  func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
   806  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   807  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   808  	uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
   809  	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
   810  	uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
   811  	return
   812  }
   813  
   814  func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
   815  	urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   816  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   817  	urv.flag = uintptr(reflect.Slice) | unsafeFlagIndir
   818  	urv.ptr = unsafe.Pointer(&unsafeZeroSlice)
   819  	return
   820  }
   821  
   822  func rvLenSlice(rv reflect.Value) int {
   823  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   824  	return (*unsafeSlice)(urv.ptr).Len
   825  }
   826  
   827  func rvCapSlice(rv reflect.Value) int {
   828  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   829  	return (*unsafeSlice)(urv.ptr).Cap
   830  }
   831  
   832  func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
   833  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   834  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   835  	uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
   836  	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
   837  	uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
   838  	return
   839  }
   840  
   841  // if scratch is nil, then return a writable view (assuming canAddr=true)
   842  func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
   843  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   844  	bx := (*unsafeSlice)(unsafe.Pointer(&bs))
   845  	bx.Data = urv.ptr
   846  	bx.Len = rv.Len()
   847  	bx.Cap = bx.Len
   848  	return
   849  }
   850  
   851  func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
   852  	// It is possible that this slice is based off an array with a larger
   853  	// len that we want (where array len == slice cap).
   854  	// However, it is ok to create an array type that is a subset of the full
   855  	// e.g. full slice is based off a *[16]byte, but we can create a *[4]byte
   856  	// off of it. That is ok.
   857  	//
   858  	// Consequently, we use rvLenSlice, not rvCapSlice.
   859  
   860  	t := reflectArrayOf(rvLenSlice(rv), rvType(rv).Elem())
   861  	// v = rvZeroAddrK(t, reflect.Array)
   862  
   863  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   864  	uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
   865  	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   866  
   867  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   868  	uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
   869  
   870  	return
   871  }
   872  
   873  func rvGetSlice4Array(rv reflect.Value, v interface{}) {
   874  	// v is a pointer to a slice to be populated
   875  	uv := (*unsafeIntf)(unsafe.Pointer(&v))
   876  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   877  
   878  	s := (*unsafeSlice)(uv.ptr)
   879  	s.Data = urv.ptr
   880  	s.Len = rv.Len()
   881  	s.Cap = s.Len
   882  }
   883  
   884  func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
   885  	typedslicecopy((*unsafeIntf)(unsafe.Pointer(&elemType)).ptr,
   886  		*(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&dest)).ptr),
   887  		*(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&src)).ptr))
   888  }
   889  
   890  // ------------
   891  
   892  func rvGetBool(rv reflect.Value) bool {
   893  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   894  	return *(*bool)(v.ptr)
   895  }
   896  
   897  func rvGetBytes(rv reflect.Value) []byte {
   898  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   899  	return *(*[]byte)(v.ptr)
   900  }
   901  
   902  func rvGetTime(rv reflect.Value) time.Time {
   903  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   904  	return *(*time.Time)(v.ptr)
   905  }
   906  
   907  func rvGetString(rv reflect.Value) string {
   908  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   909  	return *(*string)(v.ptr)
   910  }
   911  
   912  func rvGetFloat64(rv reflect.Value) float64 {
   913  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   914  	return *(*float64)(v.ptr)
   915  }
   916  
   917  func rvGetFloat32(rv reflect.Value) float32 {
   918  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   919  	return *(*float32)(v.ptr)
   920  }
   921  
   922  func rvGetComplex64(rv reflect.Value) complex64 {
   923  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   924  	return *(*complex64)(v.ptr)
   925  }
   926  
   927  func rvGetComplex128(rv reflect.Value) complex128 {
   928  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   929  	return *(*complex128)(v.ptr)
   930  }
   931  
   932  func rvGetInt(rv reflect.Value) int {
   933  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   934  	return *(*int)(v.ptr)
   935  }
   936  
   937  func rvGetInt8(rv reflect.Value) int8 {
   938  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   939  	return *(*int8)(v.ptr)
   940  }
   941  
   942  func rvGetInt16(rv reflect.Value) int16 {
   943  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   944  	return *(*int16)(v.ptr)
   945  }
   946  
   947  func rvGetInt32(rv reflect.Value) int32 {
   948  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   949  	return *(*int32)(v.ptr)
   950  }
   951  
   952  func rvGetInt64(rv reflect.Value) int64 {
   953  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   954  	return *(*int64)(v.ptr)
   955  }
   956  
   957  func rvGetUint(rv reflect.Value) uint {
   958  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   959  	return *(*uint)(v.ptr)
   960  }
   961  
   962  func rvGetUint8(rv reflect.Value) uint8 {
   963  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   964  	return *(*uint8)(v.ptr)
   965  }
   966  
   967  func rvGetUint16(rv reflect.Value) uint16 {
   968  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   969  	return *(*uint16)(v.ptr)
   970  }
   971  
   972  func rvGetUint32(rv reflect.Value) uint32 {
   973  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   974  	return *(*uint32)(v.ptr)
   975  }
   976  
   977  func rvGetUint64(rv reflect.Value) uint64 {
   978  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   979  	return *(*uint64)(v.ptr)
   980  }
   981  
   982  func rvGetUintptr(rv reflect.Value) uintptr {
   983  	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   984  	return *(*uintptr)(v.ptr)
   985  }
   986  
   987  func rvLenMap(rv reflect.Value) int {
   988  	// maplen is not inlined, because as of go1.16beta, go:linkname's are not inlined.
   989  	// thus, faster to call rv.Len() directly.
   990  	//
   991  	// MARKER: review after https://github.com/golang/go/issues/20019 fixed.
   992  
   993  	// return rv.Len()
   994  
   995  	return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
   996  }
   997  
   998  // Note: it is hard to find len(...) of an array type,
   999  // as that is a field in the arrayType representing the array, and hard to introspect.
  1000  //
  1001  // func rvLenArray(rv reflect.Value) int {	return rv.Len() }
  1002  
  1003  // ------------ map range and map indexing ----------
  1004  
  1005  // regular calls to map via reflection: MapKeys, MapIndex, MapRange/MapIter etc
  1006  // will always allocate for each map key or value.
  1007  //
  1008  // It is more performant to provide a value that the map entry is set into,
  1009  // and that elides the allocation.
  1010  
  1011  // go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
  1012  // hIter struct with the first 2 values being key and value
  1013  // of the current iteration.
  1014  //
  1015  // This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
  1016  // We bypass the reflect wrapper functions and just use the *hIter directly.
  1017  //
  1018  // Though *hIter has many fields, we only care about the first 2.
  1019  //
  1020  // We directly embed this in unsafeMapIter below
  1021  //
  1022  // hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
  1023  // so it fills multiple cache lines and can give some extra space to accomodate small growth.
  1024  
  1025  type unsafeMapIter struct {
  1026  	mtyp, mptr unsafe.Pointer
  1027  	k, v       reflect.Value
  1028  	kisref     bool
  1029  	visref     bool
  1030  	mapvalues  bool
  1031  	done       bool
  1032  	started    bool
  1033  	_          [3]byte // padding
  1034  	it         struct {
  1035  		key   unsafe.Pointer
  1036  		value unsafe.Pointer
  1037  		_     [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
  1038  	}
  1039  }
  1040  
  1041  func (t *unsafeMapIter) Next() (r bool) {
  1042  	if t == nil || t.done {
  1043  		return
  1044  	}
  1045  	if t.started {
  1046  		mapiternext((unsafe.Pointer)(&t.it))
  1047  	} else {
  1048  		t.started = true
  1049  	}
  1050  
  1051  	t.done = t.it.key == nil
  1052  	if t.done {
  1053  		return
  1054  	}
  1055  
  1056  	if helperUnsafeDirectAssignMapEntry || t.kisref {
  1057  		(*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
  1058  	} else {
  1059  		k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
  1060  		typedmemmove(k.typ, k.ptr, t.it.key)
  1061  	}
  1062  
  1063  	if t.mapvalues {
  1064  		if helperUnsafeDirectAssignMapEntry || t.visref {
  1065  			(*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
  1066  		} else {
  1067  			v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
  1068  			typedmemmove(v.typ, v.ptr, t.it.value)
  1069  		}
  1070  	}
  1071  
  1072  	return true
  1073  }
  1074  
  1075  func (t *unsafeMapIter) Key() (r reflect.Value) {
  1076  	return t.k
  1077  }
  1078  
  1079  func (t *unsafeMapIter) Value() (r reflect.Value) {
  1080  	return t.v
  1081  }
  1082  
  1083  func (t *unsafeMapIter) Done() {}
  1084  
  1085  type mapIter struct {
  1086  	unsafeMapIter
  1087  }
  1088  
  1089  func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
  1090  	if rvIsNil(m) {
  1091  		t.done = true
  1092  		return
  1093  	}
  1094  	t.done = false
  1095  	t.started = false
  1096  	t.mapvalues = mapvalues
  1097  
  1098  	// var urv *unsafeReflectValue
  1099  
  1100  	urv := (*unsafeReflectValue)(unsafe.Pointer(&m))
  1101  	t.mtyp = urv.typ
  1102  	t.mptr = rvRefPtr(urv)
  1103  
  1104  	// t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
  1105  	mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
  1106  
  1107  	t.k = k
  1108  	t.kisref = refBitset.isset(byte(k.Kind()))
  1109  
  1110  	if mapvalues {
  1111  		t.v = v
  1112  		t.visref = refBitset.isset(byte(v.Kind()))
  1113  	} else {
  1114  		t.v = reflect.Value{}
  1115  	}
  1116  }
  1117  
  1118  // unsafeMapKVPtr returns the pointer if flagIndir, else it returns a pointer to the pointer.
  1119  // It is needed as maps always keep a reference to the underlying value.
  1120  func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
  1121  	if urv.flag&unsafeFlagIndir == 0 {
  1122  		return unsafe.Pointer(&urv.ptr)
  1123  	}
  1124  	return urv.ptr
  1125  }
  1126  
  1127  // func mapDelete(m, k reflect.Value) {
  1128  // 	var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
  1129  // 	var kptr = unsafeMapKVPtr(urv)
  1130  // 	urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
  1131  // 	mapdelete(urv.typ, rv2ptr(urv), kptr)
  1132  // }
  1133  
  1134  // return an addressable reflect value that can be used in mapRange and mapGet operations.
  1135  //
  1136  // all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
  1137  func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  1138  	// return rvZeroAddrK(t, k)
  1139  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  1140  	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  1141  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  1142  	// since we always set the ptr when helperUnsafeDirectAssignMapEntry=true,
  1143  	// we should only allocate if it is not true
  1144  	if !helperUnsafeDirectAssignMapEntry {
  1145  		urv.ptr = unsafeNew(urv.typ)
  1146  	}
  1147  	return
  1148  }
  1149  
  1150  // ---------- ENCODER optimized ---------------
  1151  
  1152  func (e *Encoder) jsondriver() *jsonEncDriver {
  1153  	return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
  1154  }
  1155  
  1156  func (d *Decoder) zerocopystate() bool {
  1157  	return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
  1158  }
  1159  
  1160  func (d *Decoder) stringZC(v []byte) (s string) {
  1161  	if d.zerocopystate() {
  1162  		return stringView(v)
  1163  	}
  1164  	return d.string(v)
  1165  }
  1166  
  1167  func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
  1168  	if !d.zerocopystate() {
  1169  		*callFnRvk = true
  1170  		if d.decByteState == decByteStateReuseBuf {
  1171  			*kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
  1172  			*kstr2bs = *kstrbs
  1173  		}
  1174  	}
  1175  	return stringView(*kstr2bs)
  1176  }
  1177  
  1178  // ---------- DECODER optimized ---------------
  1179  
  1180  func (d *Decoder) checkBreak() bool {
  1181  	// MARKER: jsonDecDriver.CheckBreak() costs over 80, and this isn't inlined.
  1182  	// Consequently, there's no benefit in incurring the cost of this
  1183  	// wrapping function checkBreak.
  1184  	//
  1185  	// It is faster to just call the interface method directly.
  1186  
  1187  	// if d.js {
  1188  	// 	return d.jsondriver().CheckBreak()
  1189  	// }
  1190  	// if d.cbor {
  1191  	// 	return d.cbordriver().CheckBreak()
  1192  	// }
  1193  	return d.d.CheckBreak()
  1194  }
  1195  
  1196  func (d *Decoder) jsondriver() *jsonDecDriver {
  1197  	return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
  1198  }
  1199  
  1200  // ---------- structFieldInfo optimized ---------------
  1201  
  1202  func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
  1203  	// we already know this is exported, and maybe embedded (based on what si says)
  1204  	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  1205  	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  1206  	// clear flagEmbedRO if necessary, and inherit permission bits from v
  1207  	urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
  1208  	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
  1209  	urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
  1210  	return
  1211  }
  1212  
  1213  // runtime chan and map are designed such that the first field is the count.
  1214  // len builtin uses this to get the length of a chan/map easily.
  1215  // leverage this knowledge, since maplen and chanlen functions from runtime package
  1216  // are go:linkname'd here, and thus not inlined as of go1.16beta
  1217  
  1218  func len_map_chan(m unsafe.Pointer) int {
  1219  	if m == nil {
  1220  		return 0
  1221  	}
  1222  	return *((*int)(m))
  1223  }
  1224  
  1225  func len_map(m unsafe.Pointer) int {
  1226  	// return maplen(m)
  1227  	return len_map_chan(m)
  1228  }
  1229  func len_chan(m unsafe.Pointer) int {
  1230  	// return chanlen(m)
  1231  	return len_map_chan(m)
  1232  }
  1233  
  1234  func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
  1235  	return mallocgc(rtsize2(typ), typ, true)
  1236  }
  1237  
  1238  // ---------- go linknames (LINKED to runtime/reflect) ---------------
  1239  
  1240  // MARKER: always check that these linknames match subsequent versions of go
  1241  //
  1242  // Note that as of Jan 2021 (go 1.16 release), go:linkname(s) are not inlined
  1243  // outside of the standard library use (e.g. within sync, reflect, etc).
  1244  // If these link'ed functions were normally inlined, calling them here would
  1245  // not necessarily give a performance boost, due to function overhead.
  1246  //
  1247  // However, it seems most of these functions are not inlined anyway,
  1248  // as only maplen, chanlen and mapaccess are small enough to get inlined.
  1249  //
  1250  //   We checked this by going into $GOROOT/src/runtime and running:
  1251  //   $ go build -tags codec.notfastpath -gcflags "-m=2"
  1252  
  1253  // reflect.{unsafe_New, unsafe_NewArray} are not supported in gollvm,
  1254  // failing with "error: undefined reference" error.
  1255  // however, runtime.{mallocgc, newarray} are supported, so use that instead.
  1256  
  1257  //go:linkname mallocgc runtime.mallocgc
  1258  //go:noescape
  1259  func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
  1260  
  1261  //go:linkname newarray runtime.newarray
  1262  //go:noescape
  1263  func newarray(typ unsafe.Pointer, n int) unsafe.Pointer
  1264  
  1265  //go:linkname mapiterinit runtime.mapiterinit
  1266  //go:noescape
  1267  func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
  1268  
  1269  //go:linkname mapiternext runtime.mapiternext
  1270  //go:noescape
  1271  func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
  1272  
  1273  //go:linkname mapdelete runtime.mapdelete
  1274  //go:noescape
  1275  func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
  1276  
  1277  //go:linkname mapassign runtime.mapassign
  1278  //go:noescape
  1279  func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
  1280  
  1281  //go:linkname mapaccess2 runtime.mapaccess2
  1282  //go:noescape
  1283  func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
  1284  
  1285  // reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
  1286  // and if a writeBarrier is needed, before delegating to the right method in the runtime.
  1287  //
  1288  // This is why we use the functions in reflect, and not the ones in runtime directly.
  1289  // Calling runtime.XXX here will lead to memory issues.
  1290  
  1291  //go:linkname typedslicecopy reflect.typedslicecopy
  1292  //go:noescape
  1293  func typedslicecopy(elemType unsafe.Pointer, dst, src unsafeSlice) int
  1294  
  1295  //go:linkname typedmemmove reflect.typedmemmove
  1296  //go:noescape
  1297  func typedmemmove(typ unsafe.Pointer, dst, src unsafe.Pointer)
  1298  
  1299  //go:linkname typedmemclr reflect.typedmemclr
  1300  //go:noescape
  1301  func typedmemclr(typ unsafe.Pointer, dst unsafe.Pointer)