github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/reflect/type.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package reflect implements run-time reflection, allowing a program to
     6  // manipulate objects with arbitrary types. The typical use is to take a value
     7  // with static type interface{} and extract its dynamic type information by
     8  // calling TypeOf, which returns a Type.
     9  //
    10  // A call to ValueOf returns a Value representing the run-time data.
    11  // Zero takes a Type and returns a Value representing a zero value
    12  // for that type.
    13  //
    14  // See "The Laws of Reflection" for an introduction to reflection in Go:
    15  // https://golang.org/doc/articles/laws_of_reflection.html
    16  package reflect
    17  
    18  import (
    19  	"runtime"
    20  	"strconv"
    21  	"sync"
    22  	"unsafe"
    23  )
    24  
    25  // Type is the representation of a Go type.
    26  //
    27  // Not all methods apply to all kinds of types. Restrictions,
    28  // if any, are noted in the documentation for each method.
    29  // Use the Kind method to find out the kind of type before
    30  // calling kind-specific methods. Calling a method
    31  // inappropriate to the kind of type causes a run-time panic.
    32  //
    33  // You can use == with reflect.Type values to check whether two types
    34  // are the same.  If T1 and T2 are Go types, and v1 and v2 are values of
    35  // those types respectively, then reflect.TypeOf(v1) == reflect.TypeOf(v2)
    36  // if and only if both (interface{})(v2).(T1) and (interface{})(v1).(T2)
    37  // succeed.
    38  type Type interface {
    39  	// Methods applicable to all types.
    40  
    41  	// Align returns the alignment in bytes of a value of
    42  	// this type when allocated in memory.
    43  	Align() int
    44  
    45  	// FieldAlign returns the alignment in bytes of a value of
    46  	// this type when used as a field in a struct.
    47  	FieldAlign() int
    48  
    49  	// Method returns the i'th method in the type's method set.
    50  	// It panics if i is not in the range [0, NumMethod()).
    51  	//
    52  	// For a non-interface type T or *T, the returned Method's Type and Func
    53  	// fields describe a function whose first argument is the receiver.
    54  	//
    55  	// For an interface type, the returned Method's Type field gives the
    56  	// method signature, without a receiver, and the Func field is nil.
    57  	Method(int) Method
    58  
    59  	// MethodByName returns the method with that name in the type's
    60  	// method set and a boolean indicating if the method was found.
    61  	//
    62  	// For a non-interface type T or *T, the returned Method's Type and Func
    63  	// fields describe a function whose first argument is the receiver.
    64  	//
    65  	// For an interface type, the returned Method's Type field gives the
    66  	// method signature, without a receiver, and the Func field is nil.
    67  	MethodByName(string) (Method, bool)
    68  
    69  	// NumMethod returns the number of methods in the type's method set.
    70  	NumMethod() int
    71  
    72  	// Name returns the type's name within its package.
    73  	// It returns an empty string for unnamed types.
    74  	Name() string
    75  
    76  	// PkgPath returns a named type's package path, that is, the import path
    77  	// that uniquely identifies the package, such as "encoding/base64".
    78  	// If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
    79  	// the package path will be the empty string.
    80  	PkgPath() string
    81  
    82  	// Size returns the number of bytes needed to store
    83  	// a value of the given type; it is analogous to unsafe.Sizeof.
    84  	Size() uintptr
    85  
    86  	// String returns a string representation of the type.
    87  	// The string representation may use shortened package names
    88  	// (e.g., base64 instead of "encoding/base64") and is not
    89  	// guaranteed to be unique among types. To test for equality,
    90  	// compare the Types directly.
    91  	String() string
    92  
    93  	// Kind returns the specific kind of this type.
    94  	Kind() Kind
    95  
    96  	// Implements reports whether the type implements the interface type u.
    97  	Implements(u Type) bool
    98  
    99  	// AssignableTo reports whether a value of the type is assignable to type u.
   100  	AssignableTo(u Type) bool
   101  
   102  	// ConvertibleTo reports whether a value of the type is convertible to type u.
   103  	ConvertibleTo(u Type) bool
   104  
   105  	// Comparable reports whether values of this type are comparable.
   106  	Comparable() bool
   107  
   108  	// Methods applicable only to some types, depending on Kind.
   109  	// The methods allowed for each kind are:
   110  	//
   111  	//	Int*, Uint*, Float*, Complex*: Bits
   112  	//	Array: Elem, Len
   113  	//	Chan: ChanDir, Elem
   114  	//	Func: In, NumIn, Out, NumOut, IsVariadic.
   115  	//	Map: Key, Elem
   116  	//	Ptr: Elem
   117  	//	Slice: Elem
   118  	//	Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
   119  
   120  	// Bits returns the size of the type in bits.
   121  	// It panics if the type's Kind is not one of the
   122  	// sized or unsized Int, Uint, Float, or Complex kinds.
   123  	Bits() int
   124  
   125  	// ChanDir returns a channel type's direction.
   126  	// It panics if the type's Kind is not Chan.
   127  	ChanDir() ChanDir
   128  
   129  	// IsVariadic reports whether a function type's final input parameter
   130  	// is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
   131  	// implicit actual type []T.
   132  	//
   133  	// For concreteness, if t represents func(x int, y ... float64), then
   134  	//
   135  	//	t.NumIn() == 2
   136  	//	t.In(0) is the reflect.Type for "int"
   137  	//	t.In(1) is the reflect.Type for "[]float64"
   138  	//	t.IsVariadic() == true
   139  	//
   140  	// IsVariadic panics if the type's Kind is not Func.
   141  	IsVariadic() bool
   142  
   143  	// Elem returns a type's element type.
   144  	// It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
   145  	Elem() Type
   146  
   147  	// Field returns a struct type's i'th field.
   148  	// It panics if the type's Kind is not Struct.
   149  	// It panics if i is not in the range [0, NumField()).
   150  	Field(i int) StructField
   151  
   152  	// FieldByIndex returns the nested field corresponding
   153  	// to the index sequence. It is equivalent to calling Field
   154  	// successively for each index i.
   155  	// It panics if the type's Kind is not Struct.
   156  	FieldByIndex(index []int) StructField
   157  
   158  	// FieldByName returns the struct field with the given name
   159  	// and a boolean indicating if the field was found.
   160  	FieldByName(name string) (StructField, bool)
   161  
   162  	// FieldByNameFunc returns the first struct field with a name
   163  	// that satisfies the match function and a boolean indicating if
   164  	// the field was found.
   165  	FieldByNameFunc(match func(string) bool) (StructField, bool)
   166  
   167  	// In returns the type of a function type's i'th input parameter.
   168  	// It panics if the type's Kind is not Func.
   169  	// It panics if i is not in the range [0, NumIn()).
   170  	In(i int) Type
   171  
   172  	// Key returns a map type's key type.
   173  	// It panics if the type's Kind is not Map.
   174  	Key() Type
   175  
   176  	// Len returns an array type's length.
   177  	// It panics if the type's Kind is not Array.
   178  	Len() int
   179  
   180  	// NumField returns a struct type's field count.
   181  	// It panics if the type's Kind is not Struct.
   182  	NumField() int
   183  
   184  	// NumIn returns a function type's input parameter count.
   185  	// It panics if the type's Kind is not Func.
   186  	NumIn() int
   187  
   188  	// NumOut returns a function type's output parameter count.
   189  	// It panics if the type's Kind is not Func.
   190  	NumOut() int
   191  
   192  	// Out returns the type of a function type's i'th output parameter.
   193  	// It panics if the type's Kind is not Func.
   194  	// It panics if i is not in the range [0, NumOut()).
   195  	Out(i int) Type
   196  
   197  	common() *rtype
   198  	uncommon() *uncommonType
   199  }
   200  
   201  // BUG(rsc): FieldByName and related functions consider struct field names to be equal
   202  // if the names are equal, even if they are unexported names originating
   203  // in different packages. The practical effect of this is that the result of
   204  // t.FieldByName("x") is not well defined if the struct type t contains
   205  // multiple fields named x (embedded from different packages).
   206  // FieldByName may return one of the fields named x or may report that there are none.
   207  // See golang.org/issue/4876 for more details.
   208  
   209  /*
   210   * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
   211   * A few are known to ../runtime/type.go to convey to debuggers.
   212   * They are also known to ../runtime/type.go.
   213   */
   214  
   215  // A Kind represents the specific kind of type that a Type represents.
   216  // The zero Kind is not a valid kind.
   217  type Kind uint
   218  
   219  const (
   220  	Invalid Kind = iota
   221  	Bool
   222  	Int
   223  	Int8
   224  	Int16
   225  	Int32
   226  	Int64
   227  	Uint
   228  	Uint8
   229  	Uint16
   230  	Uint32
   231  	Uint64
   232  	Uintptr
   233  	Float32
   234  	Float64
   235  	Complex64
   236  	Complex128
   237  	Array
   238  	Chan
   239  	Func
   240  	Interface
   241  	Map
   242  	Ptr
   243  	Slice
   244  	String
   245  	Struct
   246  	UnsafePointer
   247  )
   248  
   249  // tflag is used by an rtype to signal what extra type information is
   250  // available in the memory directly following the rtype value.
   251  //
   252  // tflag values must be kept in sync with copies in:
   253  //	cmd/compile/internal/gc/reflect.go
   254  //	cmd/link/internal/ld/decodesym.go
   255  //	runtime/type.go
   256  type tflag uint8
   257  
   258  const (
   259  	// tflagUncommon means that there is a pointer, *uncommonType,
   260  	// just beyond the outer type structure.
   261  	//
   262  	// For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
   263  	// then t has uncommonType data and it can be accessed as:
   264  	//
   265  	//	type tUncommon struct {
   266  	//		structType
   267  	//		u uncommonType
   268  	//	}
   269  	//	u := &(*tUncommon)(unsafe.Pointer(t)).u
   270  	tflagUncommon tflag = 1 << 0
   271  
   272  	// tflagExtraStar means the name in the str field has an
   273  	// extraneous '*' prefix. This is because for most types T in
   274  	// a program, the type *T also exists and reusing the str data
   275  	// saves binary size.
   276  	tflagExtraStar tflag = 1 << 1
   277  
   278  	// tflagNamed means the type has a name.
   279  	tflagNamed tflag = 1 << 2
   280  )
   281  
   282  // rtype is the common implementation of most values.
   283  // It is embedded in other, public struct types, but always
   284  // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
   285  // so that code cannot convert from, say, *arrayType to *ptrType.
   286  type rtype struct {
   287  	size       uintptr
   288  	ptrdata    uintptr
   289  	hash       uint32   // hash of type; avoids computation in hash tables
   290  	tflag      tflag    // extra type information flags
   291  	align      uint8    // alignment of variable with this type
   292  	fieldAlign uint8    // alignment of struct field with this type
   293  	kind       uint8    // enumeration for C
   294  	alg        *typeAlg // algorithm table
   295  	gcdata     *byte    // garbage collection data
   296  	str        nameOff  // string form
   297  	ptrToThis  typeOff  // type for pointer to this type, may be zero
   298  }
   299  
   300  // a copy of runtime.typeAlg
   301  type typeAlg struct {
   302  	// function for hashing objects of this type
   303  	// (ptr to object, seed) -> hash
   304  	hash func(unsafe.Pointer, uintptr) uintptr
   305  	// function for comparing objects of this type
   306  	// (ptr to object A, ptr to object B) -> ==?
   307  	equal func(unsafe.Pointer, unsafe.Pointer) bool
   308  }
   309  
   310  // Method on non-interface type
   311  type method struct {
   312  	name nameOff // name of method
   313  	mtyp typeOff // method type (without receiver)
   314  	ifn  textOff // fn used in interface call (one-word receiver)
   315  	tfn  textOff // fn used for normal method call
   316  }
   317  
   318  // uncommonType is present only for types with names or methods
   319  // (if T is a named type, the uncommonTypes for T and *T have methods).
   320  // Using a pointer to this struct reduces the overall size required
   321  // to describe an unnamed type with no methods.
   322  type uncommonType struct {
   323  	pkgPath nameOff // import path; empty for built-in types like int, string
   324  	mcount  uint16  // number of methods
   325  	_       uint16  // unused
   326  	moff    uint32  // offset from this uncommontype to [mcount]method
   327  	_       uint32  // unused
   328  }
   329  
   330  // ChanDir represents a channel type's direction.
   331  type ChanDir int
   332  
   333  const (
   334  	RecvDir ChanDir             = 1 << iota // <-chan
   335  	SendDir                                 // chan<-
   336  	BothDir = RecvDir | SendDir             // chan
   337  )
   338  
   339  // arrayType represents a fixed array type.
   340  type arrayType struct {
   341  	rtype `reflect:"array"`
   342  	elem  *rtype // array element type
   343  	slice *rtype // slice type
   344  	len   uintptr
   345  }
   346  
   347  // chanType represents a channel type.
   348  type chanType struct {
   349  	rtype `reflect:"chan"`
   350  	elem  *rtype  // channel element type
   351  	dir   uintptr // channel direction (ChanDir)
   352  }
   353  
   354  // funcType represents a function type.
   355  //
   356  // A *rtype for each in and out parameter is stored in an array that
   357  // directly follows the funcType (and possibly its uncommonType). So
   358  // a function type with one method, one input, and one output is:
   359  //
   360  //	struct {
   361  //		funcType
   362  //		uncommonType
   363  //		[2]*rtype    // [0] is in, [1] is out
   364  //	}
   365  type funcType struct {
   366  	rtype    `reflect:"func"`
   367  	inCount  uint16
   368  	outCount uint16 // top bit is set if last input parameter is ...
   369  }
   370  
   371  // imethod represents a method on an interface type
   372  type imethod struct {
   373  	name nameOff // name of method
   374  	typ  typeOff // .(*FuncType) underneath
   375  }
   376  
   377  // interfaceType represents an interface type.
   378  type interfaceType struct {
   379  	rtype   `reflect:"interface"`
   380  	pkgPath name      // import path
   381  	methods []imethod // sorted by hash
   382  }
   383  
   384  // mapType represents a map type.
   385  type mapType struct {
   386  	rtype         `reflect:"map"`
   387  	key           *rtype // map key type
   388  	elem          *rtype // map element (value) type
   389  	bucket        *rtype // internal bucket structure
   390  	hmap          *rtype // internal map header
   391  	keysize       uint8  // size of key slot
   392  	indirectkey   uint8  // store ptr to key instead of key itself
   393  	valuesize     uint8  // size of value slot
   394  	indirectvalue uint8  // store ptr to value instead of value itself
   395  	bucketsize    uint16 // size of bucket
   396  	reflexivekey  bool   // true if k==k for all keys
   397  	needkeyupdate bool   // true if we need to update key on an overwrite
   398  }
   399  
   400  // ptrType represents a pointer type.
   401  type ptrType struct {
   402  	rtype `reflect:"ptr"`
   403  	elem  *rtype // pointer element (pointed at) type
   404  }
   405  
   406  // sliceType represents a slice type.
   407  type sliceType struct {
   408  	rtype `reflect:"slice"`
   409  	elem  *rtype // slice element type
   410  }
   411  
   412  // Struct field
   413  type structField struct {
   414  	name   name    // name is empty for embedded fields
   415  	typ    *rtype  // type of field
   416  	offset uintptr // byte offset of field within struct
   417  }
   418  
   419  // structType represents a struct type.
   420  type structType struct {
   421  	rtype   `reflect:"struct"`
   422  	pkgPath name
   423  	fields  []structField // sorted by offset
   424  }
   425  
   426  // name is an encoded type name with optional extra data.
   427  //
   428  // The first byte is a bit field containing:
   429  //
   430  //	1<<0 the name is exported
   431  //	1<<1 tag data follows the name
   432  //	1<<2 pkgPath nameOff follows the name and tag
   433  //
   434  // The next two bytes are the data length:
   435  //
   436  //	 l := uint16(data[1])<<8 | uint16(data[2])
   437  //
   438  // Bytes [3:3+l] are the string data.
   439  //
   440  // If tag data follows then bytes 3+l and 3+l+1 are the tag length,
   441  // with the data following.
   442  //
   443  // If the import path follows, then 4 bytes at the end of
   444  // the data form a nameOff. The import path is only set for concrete
   445  // methods that are defined in a different package than their type.
   446  //
   447  // If a name starts with "*", then the exported bit represents
   448  // whether the pointed to type is exported.
   449  type name struct {
   450  	bytes *byte
   451  }
   452  
   453  func (n name) data(off int) *byte {
   454  	return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
   455  }
   456  
   457  func (n name) isExported() bool {
   458  	return (*n.bytes)&(1<<0) != 0
   459  }
   460  
   461  func (n name) nameLen() int {
   462  	return int(uint16(*n.data(1))<<8 | uint16(*n.data(2)))
   463  }
   464  
   465  func (n name) tagLen() int {
   466  	if *n.data(0)&(1<<1) == 0 {
   467  		return 0
   468  	}
   469  	off := 3 + n.nameLen()
   470  	return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1)))
   471  }
   472  
   473  func (n name) name() (s string) {
   474  	if n.bytes == nil {
   475  		return
   476  	}
   477  	b := (*[4]byte)(unsafe.Pointer(n.bytes))
   478  
   479  	hdr := (*stringHeader)(unsafe.Pointer(&s))
   480  	hdr.Data = unsafe.Pointer(&b[3])
   481  	hdr.Len = int(b[1])<<8 | int(b[2])
   482  	return s
   483  }
   484  
   485  func (n name) tag() (s string) {
   486  	tl := n.tagLen()
   487  	if tl == 0 {
   488  		return ""
   489  	}
   490  	nl := n.nameLen()
   491  	hdr := (*stringHeader)(unsafe.Pointer(&s))
   492  	hdr.Data = unsafe.Pointer(n.data(3 + nl + 2))
   493  	hdr.Len = tl
   494  	return s
   495  }
   496  
   497  func (n name) pkgPath() string {
   498  	if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
   499  		return ""
   500  	}
   501  	off := 3 + n.nameLen()
   502  	if tl := n.tagLen(); tl > 0 {
   503  		off += 2 + tl
   504  	}
   505  	var nameOff int32
   506  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
   507  	pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
   508  	return pkgPathName.name()
   509  }
   510  
   511  // round n up to a multiple of a.  a must be a power of 2.
   512  func round(n, a uintptr) uintptr {
   513  	return (n + a - 1) &^ (a - 1)
   514  }
   515  
   516  func newName(n, tag, pkgPath string, exported bool) name {
   517  	if len(n) > 1<<16-1 {
   518  		panic("reflect.nameFrom: name too long: " + n)
   519  	}
   520  	if len(tag) > 1<<16-1 {
   521  		panic("reflect.nameFrom: tag too long: " + tag)
   522  	}
   523  
   524  	var bits byte
   525  	l := 1 + 2 + len(n)
   526  	if exported {
   527  		bits |= 1 << 0
   528  	}
   529  	if len(tag) > 0 {
   530  		l += 2 + len(tag)
   531  		bits |= 1 << 1
   532  	}
   533  	if pkgPath != "" {
   534  		bits |= 1 << 2
   535  	}
   536  
   537  	b := make([]byte, l)
   538  	b[0] = bits
   539  	b[1] = uint8(len(n) >> 8)
   540  	b[2] = uint8(len(n))
   541  	copy(b[3:], n)
   542  	if len(tag) > 0 {
   543  		tb := b[3+len(n):]
   544  		tb[0] = uint8(len(tag) >> 8)
   545  		tb[1] = uint8(len(tag))
   546  		copy(tb[2:], tag)
   547  	}
   548  
   549  	if pkgPath != "" {
   550  		panic("reflect: creating a name with a package path is not supported")
   551  	}
   552  
   553  	return name{bytes: &b[0]}
   554  }
   555  
   556  /*
   557   * The compiler knows the exact layout of all the data structures above.
   558   * The compiler does not know about the data structures and methods below.
   559   */
   560  
   561  // Method represents a single method.
   562  type Method struct {
   563  	// Name is the method name.
   564  	// PkgPath is the package path that qualifies a lower case (unexported)
   565  	// method name. It is empty for upper case (exported) method names.
   566  	// The combination of PkgPath and Name uniquely identifies a method
   567  	// in a method set.
   568  	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
   569  	Name    string
   570  	PkgPath string
   571  
   572  	Type  Type  // method type
   573  	Func  Value // func with receiver as first argument
   574  	Index int   // index for Type.Method
   575  }
   576  
   577  const (
   578  	kindDirectIface = 1 << 5
   579  	kindGCProg      = 1 << 6 // Type.gc points to GC program
   580  	kindNoPointers  = 1 << 7
   581  	kindMask        = (1 << 5) - 1
   582  )
   583  
   584  func (k Kind) String() string {
   585  	if int(k) < len(kindNames) {
   586  		return kindNames[k]
   587  	}
   588  	return "kind" + strconv.Itoa(int(k))
   589  }
   590  
   591  var kindNames = []string{
   592  	Invalid:       "invalid",
   593  	Bool:          "bool",
   594  	Int:           "int",
   595  	Int8:          "int8",
   596  	Int16:         "int16",
   597  	Int32:         "int32",
   598  	Int64:         "int64",
   599  	Uint:          "uint",
   600  	Uint8:         "uint8",
   601  	Uint16:        "uint16",
   602  	Uint32:        "uint32",
   603  	Uint64:        "uint64",
   604  	Uintptr:       "uintptr",
   605  	Float32:       "float32",
   606  	Float64:       "float64",
   607  	Complex64:     "complex64",
   608  	Complex128:    "complex128",
   609  	Array:         "array",
   610  	Chan:          "chan",
   611  	Func:          "func",
   612  	Interface:     "interface",
   613  	Map:           "map",
   614  	Ptr:           "ptr",
   615  	Slice:         "slice",
   616  	String:        "string",
   617  	Struct:        "struct",
   618  	UnsafePointer: "unsafe.Pointer",
   619  }
   620  
   621  func (t *uncommonType) methods() []method {
   622  	return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
   623  }
   624  
   625  // resolveNameOff resolves a name offset from a base pointer.
   626  // The (*rtype).nameOff method is a convenience wrapper for this function.
   627  // Implemented in the runtime package.
   628  func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
   629  
   630  // resolveTypeOff resolves an *rtype offset from a base type.
   631  // The (*rtype).typeOff method is a convenience wrapper for this function.
   632  // Implemented in the runtime package.
   633  func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
   634  
   635  // resolveTextOff resolves an function pointer offset from a base type.
   636  // The (*rtype).textOff method is a convenience wrapper for this function.
   637  // Implemented in the runtime package.
   638  func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
   639  
   640  // addReflectOff adds a pointer to the reflection lookup map in the runtime.
   641  // It returns a new ID that can be used as a typeOff or textOff, and will
   642  // be resolved correctly. Implemented in the runtime package.
   643  func addReflectOff(ptr unsafe.Pointer) int32
   644  
   645  // resolveReflectType adds a name to the reflection lookup map in the runtime.
   646  // It returns a new nameOff that can be used to refer to the pointer.
   647  func resolveReflectName(n name) nameOff {
   648  	return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
   649  }
   650  
   651  // resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
   652  // It returns a new typeOff that can be used to refer to the pointer.
   653  func resolveReflectType(t *rtype) typeOff {
   654  	return typeOff(addReflectOff(unsafe.Pointer(t)))
   655  }
   656  
   657  // resolveReflectText adds a function pointer to the reflection lookup map in
   658  // the runtime. It returns a new textOff that can be used to refer to the
   659  // pointer.
   660  func resolveReflectText(ptr unsafe.Pointer) textOff {
   661  	return textOff(addReflectOff(ptr))
   662  }
   663  
   664  type nameOff int32 // offset to a name
   665  type typeOff int32 // offset to an *rtype
   666  type textOff int32 // offset from top of text section
   667  
   668  func (t *rtype) nameOff(off nameOff) name {
   669  	return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
   670  }
   671  
   672  func (t *rtype) typeOff(off typeOff) *rtype {
   673  	return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
   674  }
   675  
   676  func (t *rtype) textOff(off textOff) unsafe.Pointer {
   677  	return resolveTextOff(unsafe.Pointer(t), int32(off))
   678  }
   679  
   680  func (t *rtype) uncommon() *uncommonType {
   681  	if t.tflag&tflagUncommon == 0 {
   682  		return nil
   683  	}
   684  	switch t.Kind() {
   685  	case Struct:
   686  		return &(*structTypeUncommon)(unsafe.Pointer(t)).u
   687  	case Ptr:
   688  		type u struct {
   689  			ptrType
   690  			u uncommonType
   691  		}
   692  		return &(*u)(unsafe.Pointer(t)).u
   693  	case Func:
   694  		type u struct {
   695  			funcType
   696  			u uncommonType
   697  		}
   698  		return &(*u)(unsafe.Pointer(t)).u
   699  	case Slice:
   700  		type u struct {
   701  			sliceType
   702  			u uncommonType
   703  		}
   704  		return &(*u)(unsafe.Pointer(t)).u
   705  	case Array:
   706  		type u struct {
   707  			arrayType
   708  			u uncommonType
   709  		}
   710  		return &(*u)(unsafe.Pointer(t)).u
   711  	case Chan:
   712  		type u struct {
   713  			chanType
   714  			u uncommonType
   715  		}
   716  		return &(*u)(unsafe.Pointer(t)).u
   717  	case Map:
   718  		type u struct {
   719  			mapType
   720  			u uncommonType
   721  		}
   722  		return &(*u)(unsafe.Pointer(t)).u
   723  	case Interface:
   724  		type u struct {
   725  			interfaceType
   726  			u uncommonType
   727  		}
   728  		return &(*u)(unsafe.Pointer(t)).u
   729  	default:
   730  		type u struct {
   731  			rtype
   732  			u uncommonType
   733  		}
   734  		return &(*u)(unsafe.Pointer(t)).u
   735  	}
   736  }
   737  
   738  func (t *rtype) String() string {
   739  	s := t.nameOff(t.str).name()
   740  	if t.tflag&tflagExtraStar != 0 {
   741  		return s[1:]
   742  	}
   743  	return s
   744  }
   745  
   746  func (t *rtype) Size() uintptr { return t.size }
   747  
   748  func (t *rtype) Bits() int {
   749  	if t == nil {
   750  		panic("reflect: Bits of nil Type")
   751  	}
   752  	k := t.Kind()
   753  	if k < Int || k > Complex128 {
   754  		panic("reflect: Bits of non-arithmetic Type " + t.String())
   755  	}
   756  	return int(t.size) * 8
   757  }
   758  
   759  func (t *rtype) Align() int { return int(t.align) }
   760  
   761  func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
   762  
   763  func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
   764  
   765  func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
   766  
   767  func (t *rtype) common() *rtype { return t }
   768  
   769  var methodCache struct {
   770  	sync.RWMutex
   771  	m map[*rtype][]method
   772  }
   773  
   774  func (t *rtype) exportedMethods() []method {
   775  	methodCache.RLock()
   776  	methods, found := methodCache.m[t]
   777  	methodCache.RUnlock()
   778  
   779  	if found {
   780  		return methods
   781  	}
   782  
   783  	ut := t.uncommon()
   784  	if ut == nil {
   785  		return nil
   786  	}
   787  	allm := ut.methods()
   788  	allExported := true
   789  	for _, m := range allm {
   790  		name := t.nameOff(m.name)
   791  		if !name.isExported() {
   792  			allExported = false
   793  			break
   794  		}
   795  	}
   796  	if allExported {
   797  		methods = allm
   798  	} else {
   799  		methods = make([]method, 0, len(allm))
   800  		for _, m := range allm {
   801  			name := t.nameOff(m.name)
   802  			if name.isExported() {
   803  				methods = append(methods, m)
   804  			}
   805  		}
   806  		methods = methods[:len(methods):len(methods)]
   807  	}
   808  
   809  	methodCache.Lock()
   810  	if methodCache.m == nil {
   811  		methodCache.m = make(map[*rtype][]method)
   812  	}
   813  	methodCache.m[t] = methods
   814  	methodCache.Unlock()
   815  
   816  	return methods
   817  }
   818  
   819  func (t *rtype) NumMethod() int {
   820  	if t.Kind() == Interface {
   821  		tt := (*interfaceType)(unsafe.Pointer(t))
   822  		return tt.NumMethod()
   823  	}
   824  	if t.tflag&tflagUncommon == 0 {
   825  		return 0 // avoid methodCache lock in zero case
   826  	}
   827  	return len(t.exportedMethods())
   828  }
   829  
   830  func (t *rtype) Method(i int) (m Method) {
   831  	if t.Kind() == Interface {
   832  		tt := (*interfaceType)(unsafe.Pointer(t))
   833  		return tt.Method(i)
   834  	}
   835  	methods := t.exportedMethods()
   836  	if i < 0 || i >= len(methods) {
   837  		panic("reflect: Method index out of range")
   838  	}
   839  	p := methods[i]
   840  	pname := t.nameOff(p.name)
   841  	m.Name = pname.name()
   842  	fl := flag(Func)
   843  	mtyp := t.typeOff(p.mtyp)
   844  	ft := (*funcType)(unsafe.Pointer(mtyp))
   845  	in := make([]Type, 0, 1+len(ft.in()))
   846  	in = append(in, t)
   847  	for _, arg := range ft.in() {
   848  		in = append(in, arg)
   849  	}
   850  	out := make([]Type, 0, len(ft.out()))
   851  	for _, ret := range ft.out() {
   852  		out = append(out, ret)
   853  	}
   854  	mt := FuncOf(in, out, ft.IsVariadic())
   855  	m.Type = mt
   856  	tfn := t.textOff(p.tfn)
   857  	fn := unsafe.Pointer(&tfn)
   858  	m.Func = Value{mt.(*rtype), fn, fl}
   859  
   860  	m.Index = i
   861  	return m
   862  }
   863  
   864  func (t *rtype) MethodByName(name string) (m Method, ok bool) {
   865  	if t.Kind() == Interface {
   866  		tt := (*interfaceType)(unsafe.Pointer(t))
   867  		return tt.MethodByName(name)
   868  	}
   869  	ut := t.uncommon()
   870  	if ut == nil {
   871  		return Method{}, false
   872  	}
   873  	utmethods := ut.methods()
   874  	for i := 0; i < int(ut.mcount); i++ {
   875  		p := utmethods[i]
   876  		pname := t.nameOff(p.name)
   877  		if pname.isExported() && pname.name() == name {
   878  			return t.Method(i), true
   879  		}
   880  	}
   881  	return Method{}, false
   882  }
   883  
   884  func (t *rtype) PkgPath() string {
   885  	if t.tflag&tflagNamed == 0 {
   886  		return ""
   887  	}
   888  	ut := t.uncommon()
   889  	if ut == nil {
   890  		return ""
   891  	}
   892  	return t.nameOff(ut.pkgPath).name()
   893  }
   894  
   895  func hasPrefix(s, prefix string) bool {
   896  	return len(s) >= len(prefix) && s[:len(prefix)] == prefix
   897  }
   898  
   899  func (t *rtype) Name() string {
   900  	if t.tflag&tflagNamed == 0 {
   901  		return ""
   902  	}
   903  	s := t.String()
   904  	i := len(s) - 1
   905  	for i >= 0 {
   906  		if s[i] == '.' {
   907  			break
   908  		}
   909  		i--
   910  	}
   911  	return s[i+1:]
   912  }
   913  
   914  func (t *rtype) ChanDir() ChanDir {
   915  	if t.Kind() != Chan {
   916  		panic("reflect: ChanDir of non-chan type")
   917  	}
   918  	tt := (*chanType)(unsafe.Pointer(t))
   919  	return ChanDir(tt.dir)
   920  }
   921  
   922  func (t *rtype) IsVariadic() bool {
   923  	if t.Kind() != Func {
   924  		panic("reflect: IsVariadic of non-func type")
   925  	}
   926  	tt := (*funcType)(unsafe.Pointer(t))
   927  	return tt.outCount&(1<<15) != 0
   928  }
   929  
   930  func (t *rtype) Elem() Type {
   931  	switch t.Kind() {
   932  	case Array:
   933  		tt := (*arrayType)(unsafe.Pointer(t))
   934  		return toType(tt.elem)
   935  	case Chan:
   936  		tt := (*chanType)(unsafe.Pointer(t))
   937  		return toType(tt.elem)
   938  	case Map:
   939  		tt := (*mapType)(unsafe.Pointer(t))
   940  		return toType(tt.elem)
   941  	case Ptr:
   942  		tt := (*ptrType)(unsafe.Pointer(t))
   943  		return toType(tt.elem)
   944  	case Slice:
   945  		tt := (*sliceType)(unsafe.Pointer(t))
   946  		return toType(tt.elem)
   947  	}
   948  	panic("reflect: Elem of invalid type")
   949  }
   950  
   951  func (t *rtype) Field(i int) StructField {
   952  	if t.Kind() != Struct {
   953  		panic("reflect: Field of non-struct type")
   954  	}
   955  	tt := (*structType)(unsafe.Pointer(t))
   956  	return tt.Field(i)
   957  }
   958  
   959  func (t *rtype) FieldByIndex(index []int) StructField {
   960  	if t.Kind() != Struct {
   961  		panic("reflect: FieldByIndex of non-struct type")
   962  	}
   963  	tt := (*structType)(unsafe.Pointer(t))
   964  	return tt.FieldByIndex(index)
   965  }
   966  
   967  func (t *rtype) FieldByName(name string) (StructField, bool) {
   968  	if t.Kind() != Struct {
   969  		panic("reflect: FieldByName of non-struct type")
   970  	}
   971  	tt := (*structType)(unsafe.Pointer(t))
   972  	return tt.FieldByName(name)
   973  }
   974  
   975  func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
   976  	if t.Kind() != Struct {
   977  		panic("reflect: FieldByNameFunc of non-struct type")
   978  	}
   979  	tt := (*structType)(unsafe.Pointer(t))
   980  	return tt.FieldByNameFunc(match)
   981  }
   982  
   983  func (t *rtype) In(i int) Type {
   984  	if t.Kind() != Func {
   985  		panic("reflect: In of non-func type")
   986  	}
   987  	tt := (*funcType)(unsafe.Pointer(t))
   988  	return toType(tt.in()[i])
   989  }
   990  
   991  func (t *rtype) Key() Type {
   992  	if t.Kind() != Map {
   993  		panic("reflect: Key of non-map type")
   994  	}
   995  	tt := (*mapType)(unsafe.Pointer(t))
   996  	return toType(tt.key)
   997  }
   998  
   999  func (t *rtype) Len() int {
  1000  	if t.Kind() != Array {
  1001  		panic("reflect: Len of non-array type")
  1002  	}
  1003  	tt := (*arrayType)(unsafe.Pointer(t))
  1004  	return int(tt.len)
  1005  }
  1006  
  1007  func (t *rtype) NumField() int {
  1008  	if t.Kind() != Struct {
  1009  		panic("reflect: NumField of non-struct type")
  1010  	}
  1011  	tt := (*structType)(unsafe.Pointer(t))
  1012  	return len(tt.fields)
  1013  }
  1014  
  1015  func (t *rtype) NumIn() int {
  1016  	if t.Kind() != Func {
  1017  		panic("reflect: NumIn of non-func type")
  1018  	}
  1019  	tt := (*funcType)(unsafe.Pointer(t))
  1020  	return int(tt.inCount)
  1021  }
  1022  
  1023  func (t *rtype) NumOut() int {
  1024  	if t.Kind() != Func {
  1025  		panic("reflect: NumOut of non-func type")
  1026  	}
  1027  	tt := (*funcType)(unsafe.Pointer(t))
  1028  	return len(tt.out())
  1029  }
  1030  
  1031  func (t *rtype) Out(i int) Type {
  1032  	if t.Kind() != Func {
  1033  		panic("reflect: Out of non-func type")
  1034  	}
  1035  	tt := (*funcType)(unsafe.Pointer(t))
  1036  	return toType(tt.out()[i])
  1037  }
  1038  
  1039  func (t *funcType) in() []*rtype {
  1040  	uadd := unsafe.Sizeof(*t)
  1041  	if t.tflag&tflagUncommon != 0 {
  1042  		uadd += unsafe.Sizeof(uncommonType{})
  1043  	}
  1044  	return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount]
  1045  }
  1046  
  1047  func (t *funcType) out() []*rtype {
  1048  	uadd := unsafe.Sizeof(*t)
  1049  	if t.tflag&tflagUncommon != 0 {
  1050  		uadd += unsafe.Sizeof(uncommonType{})
  1051  	}
  1052  	outCount := t.outCount & (1<<15 - 1)
  1053  	return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
  1054  }
  1055  
  1056  func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
  1057  	return unsafe.Pointer(uintptr(p) + x)
  1058  }
  1059  
  1060  func (d ChanDir) String() string {
  1061  	switch d {
  1062  	case SendDir:
  1063  		return "chan<-"
  1064  	case RecvDir:
  1065  		return "<-chan"
  1066  	case BothDir:
  1067  		return "chan"
  1068  	}
  1069  	return "ChanDir" + strconv.Itoa(int(d))
  1070  }
  1071  
  1072  // Method returns the i'th method in the type's method set.
  1073  func (t *interfaceType) Method(i int) (m Method) {
  1074  	if i < 0 || i >= len(t.methods) {
  1075  		return
  1076  	}
  1077  	p := &t.methods[i]
  1078  	pname := t.nameOff(p.name)
  1079  	m.Name = pname.name()
  1080  	if !pname.isExported() {
  1081  		m.PkgPath = pname.pkgPath()
  1082  		if m.PkgPath == "" {
  1083  			m.PkgPath = t.pkgPath.name()
  1084  		}
  1085  	}
  1086  	m.Type = toType(t.typeOff(p.typ))
  1087  	m.Index = i
  1088  	return
  1089  }
  1090  
  1091  // NumMethod returns the number of interface methods in the type's method set.
  1092  func (t *interfaceType) NumMethod() int { return len(t.methods) }
  1093  
  1094  // MethodByName method with the given name in the type's method set.
  1095  func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
  1096  	if t == nil {
  1097  		return
  1098  	}
  1099  	var p *imethod
  1100  	for i := range t.methods {
  1101  		p = &t.methods[i]
  1102  		if t.nameOff(p.name).name() == name {
  1103  			return t.Method(i), true
  1104  		}
  1105  	}
  1106  	return
  1107  }
  1108  
  1109  // A StructField describes a single field in a struct.
  1110  type StructField struct {
  1111  	// Name is the field name.
  1112  	Name string
  1113  	// PkgPath is the package path that qualifies a lower case (unexported)
  1114  	// field name. It is empty for upper case (exported) field names.
  1115  	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
  1116  	PkgPath string
  1117  
  1118  	Type      Type      // field type
  1119  	Tag       StructTag // field tag string
  1120  	Offset    uintptr   // offset within struct, in bytes
  1121  	Index     []int     // index sequence for Type.FieldByIndex
  1122  	Anonymous bool      // is an embedded field
  1123  }
  1124  
  1125  // A StructTag is the tag string in a struct field.
  1126  //
  1127  // By convention, tag strings are a concatenation of
  1128  // optionally space-separated key:"value" pairs.
  1129  // Each key is a non-empty string consisting of non-control
  1130  // characters other than space (U+0020 ' '), quote (U+0022 '"'),
  1131  // and colon (U+003A ':').  Each value is quoted using U+0022 '"'
  1132  // characters and Go string literal syntax.
  1133  type StructTag string
  1134  
  1135  // Get returns the value associated with key in the tag string.
  1136  // If there is no such key in the tag, Get returns the empty string.
  1137  // If the tag does not have the conventional format, the value
  1138  // returned by Get is unspecified. To determine whether a tag is
  1139  // explicitly set to the empty string, use Lookup.
  1140  func (tag StructTag) Get(key string) string {
  1141  	v, _ := tag.Lookup(key)
  1142  	return v
  1143  }
  1144  
  1145  // Lookup returns the value associated with key in the tag string.
  1146  // If the key is present in the tag the value (which may be empty)
  1147  // is returned. Otherwise the returned value will be the empty string.
  1148  // The ok return value reports whether the value was explicitly set in
  1149  // the tag string. If the tag does not have the conventional format,
  1150  // the value returned by Lookup is unspecified.
  1151  func (tag StructTag) Lookup(key string) (value string, ok bool) {
  1152  	// When modifying this code, also update the validateStructTag code
  1153  	// in golang.org/x/tools/cmd/vet/structtag.go.
  1154  
  1155  	for tag != "" {
  1156  		// Skip leading space.
  1157  		i := 0
  1158  		for i < len(tag) && tag[i] == ' ' {
  1159  			i++
  1160  		}
  1161  		tag = tag[i:]
  1162  		if tag == "" {
  1163  			break
  1164  		}
  1165  
  1166  		// Scan to colon. A space, a quote or a control character is a syntax error.
  1167  		// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
  1168  		// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
  1169  		// as it is simpler to inspect the tag's bytes than the tag's runes.
  1170  		i = 0
  1171  		for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
  1172  			i++
  1173  		}
  1174  		if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
  1175  			break
  1176  		}
  1177  		name := string(tag[:i])
  1178  		tag = tag[i+1:]
  1179  
  1180  		// Scan quoted string to find value.
  1181  		i = 1
  1182  		for i < len(tag) && tag[i] != '"' {
  1183  			if tag[i] == '\\' {
  1184  				i++
  1185  			}
  1186  			i++
  1187  		}
  1188  		if i >= len(tag) {
  1189  			break
  1190  		}
  1191  		qvalue := string(tag[:i+1])
  1192  		tag = tag[i+1:]
  1193  
  1194  		if key == name {
  1195  			value, err := strconv.Unquote(qvalue)
  1196  			if err != nil {
  1197  				break
  1198  			}
  1199  			return value, true
  1200  		}
  1201  	}
  1202  	return "", false
  1203  }
  1204  
  1205  // Field returns the i'th struct field.
  1206  func (t *structType) Field(i int) (f StructField) {
  1207  	if i < 0 || i >= len(t.fields) {
  1208  		panic("reflect: Field index out of bounds")
  1209  	}
  1210  	p := &t.fields[i]
  1211  	f.Type = toType(p.typ)
  1212  	if name := p.name.name(); name != "" {
  1213  		f.Name = name
  1214  	} else {
  1215  		t := f.Type
  1216  		if t.Kind() == Ptr {
  1217  			t = t.Elem()
  1218  		}
  1219  		f.Name = t.Name()
  1220  		f.Anonymous = true
  1221  	}
  1222  	if !p.name.isExported() {
  1223  		// Fields never have an import path in their name.
  1224  		f.PkgPath = t.pkgPath.name()
  1225  	}
  1226  	if tag := p.name.tag(); tag != "" {
  1227  		f.Tag = StructTag(tag)
  1228  	}
  1229  	f.Offset = p.offset
  1230  
  1231  	// NOTE(rsc): This is the only allocation in the interface
  1232  	// presented by a reflect.Type. It would be nice to avoid,
  1233  	// at least in the common cases, but we need to make sure
  1234  	// that misbehaving clients of reflect cannot affect other
  1235  	// uses of reflect. One possibility is CL 5371098, but we
  1236  	// postponed that ugliness until there is a demonstrated
  1237  	// need for the performance. This is issue 2320.
  1238  	f.Index = []int{i}
  1239  	return
  1240  }
  1241  
  1242  // TODO(gri): Should there be an error/bool indicator if the index
  1243  //            is wrong for FieldByIndex?
  1244  
  1245  // FieldByIndex returns the nested field corresponding to index.
  1246  func (t *structType) FieldByIndex(index []int) (f StructField) {
  1247  	f.Type = toType(&t.rtype)
  1248  	for i, x := range index {
  1249  		if i > 0 {
  1250  			ft := f.Type
  1251  			if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
  1252  				ft = ft.Elem()
  1253  			}
  1254  			f.Type = ft
  1255  		}
  1256  		f = f.Type.Field(x)
  1257  	}
  1258  	return
  1259  }
  1260  
  1261  // A fieldScan represents an item on the fieldByNameFunc scan work list.
  1262  type fieldScan struct {
  1263  	typ   *structType
  1264  	index []int
  1265  }
  1266  
  1267  // FieldByNameFunc returns the struct field with a name that satisfies the
  1268  // match function and a boolean to indicate if the field was found.
  1269  func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
  1270  	// This uses the same condition that the Go language does: there must be a unique instance
  1271  	// of the match at a given depth level. If there are multiple instances of a match at the
  1272  	// same depth, they annihilate each other and inhibit any possible match at a lower level.
  1273  	// The algorithm is breadth first search, one depth level at a time.
  1274  
  1275  	// The current and next slices are work queues:
  1276  	// current lists the fields to visit on this depth level,
  1277  	// and next lists the fields on the next lower level.
  1278  	current := []fieldScan{}
  1279  	next := []fieldScan{{typ: t}}
  1280  
  1281  	// nextCount records the number of times an embedded type has been
  1282  	// encountered and considered for queueing in the 'next' slice.
  1283  	// We only queue the first one, but we increment the count on each.
  1284  	// If a struct type T can be reached more than once at a given depth level,
  1285  	// then it annihilates itself and need not be considered at all when we
  1286  	// process that next depth level.
  1287  	var nextCount map[*structType]int
  1288  
  1289  	// visited records the structs that have been considered already.
  1290  	// Embedded pointer fields can create cycles in the graph of
  1291  	// reachable embedded types; visited avoids following those cycles.
  1292  	// It also avoids duplicated effort: if we didn't find the field in an
  1293  	// embedded type T at level 2, we won't find it in one at level 4 either.
  1294  	visited := map[*structType]bool{}
  1295  
  1296  	for len(next) > 0 {
  1297  		current, next = next, current[:0]
  1298  		count := nextCount
  1299  		nextCount = nil
  1300  
  1301  		// Process all the fields at this depth, now listed in 'current'.
  1302  		// The loop queues embedded fields found in 'next', for processing during the next
  1303  		// iteration. The multiplicity of the 'current' field counts is recorded
  1304  		// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
  1305  		for _, scan := range current {
  1306  			t := scan.typ
  1307  			if visited[t] {
  1308  				// We've looked through this type before, at a higher level.
  1309  				// That higher level would shadow the lower level we're now at,
  1310  				// so this one can't be useful to us. Ignore it.
  1311  				continue
  1312  			}
  1313  			visited[t] = true
  1314  			for i := range t.fields {
  1315  				f := &t.fields[i]
  1316  				// Find name and type for field f.
  1317  				var fname string
  1318  				var ntyp *rtype
  1319  				if name := f.name.name(); name != "" {
  1320  					fname = name
  1321  				} else {
  1322  					// Anonymous field of type T or *T.
  1323  					// Name taken from type.
  1324  					ntyp = f.typ
  1325  					if ntyp.Kind() == Ptr {
  1326  						ntyp = ntyp.Elem().common()
  1327  					}
  1328  					fname = ntyp.Name()
  1329  				}
  1330  
  1331  				// Does it match?
  1332  				if match(fname) {
  1333  					// Potential match
  1334  					if count[t] > 1 || ok {
  1335  						// Name appeared multiple times at this level: annihilate.
  1336  						return StructField{}, false
  1337  					}
  1338  					result = t.Field(i)
  1339  					result.Index = nil
  1340  					result.Index = append(result.Index, scan.index...)
  1341  					result.Index = append(result.Index, i)
  1342  					ok = true
  1343  					continue
  1344  				}
  1345  
  1346  				// Queue embedded struct fields for processing with next level,
  1347  				// but only if we haven't seen a match yet at this level and only
  1348  				// if the embedded types haven't already been queued.
  1349  				if ok || ntyp == nil || ntyp.Kind() != Struct {
  1350  					continue
  1351  				}
  1352  				styp := (*structType)(unsafe.Pointer(ntyp))
  1353  				if nextCount[styp] > 0 {
  1354  					nextCount[styp] = 2 // exact multiple doesn't matter
  1355  					continue
  1356  				}
  1357  				if nextCount == nil {
  1358  					nextCount = map[*structType]int{}
  1359  				}
  1360  				nextCount[styp] = 1
  1361  				if count[t] > 1 {
  1362  					nextCount[styp] = 2 // exact multiple doesn't matter
  1363  				}
  1364  				var index []int
  1365  				index = append(index, scan.index...)
  1366  				index = append(index, i)
  1367  				next = append(next, fieldScan{styp, index})
  1368  			}
  1369  		}
  1370  		if ok {
  1371  			break
  1372  		}
  1373  	}
  1374  	return
  1375  }
  1376  
  1377  // FieldByName returns the struct field with the given name
  1378  // and a boolean to indicate if the field was found.
  1379  func (t *structType) FieldByName(name string) (f StructField, present bool) {
  1380  	// Quick check for top-level name, or struct without anonymous fields.
  1381  	hasAnon := false
  1382  	if name != "" {
  1383  		for i := range t.fields {
  1384  			tf := &t.fields[i]
  1385  			tfname := tf.name.name()
  1386  			if tfname == "" {
  1387  				hasAnon = true
  1388  				continue
  1389  			}
  1390  			if tfname == name {
  1391  				return t.Field(i), true
  1392  			}
  1393  		}
  1394  	}
  1395  	if !hasAnon {
  1396  		return
  1397  	}
  1398  	return t.FieldByNameFunc(func(s string) bool { return s == name })
  1399  }
  1400  
  1401  // TypeOf returns the reflection Type that represents the dynamic type of i.
  1402  // If i is a nil interface value, TypeOf returns nil.
  1403  func TypeOf(i interface{}) Type {
  1404  	eface := *(*emptyInterface)(unsafe.Pointer(&i))
  1405  	return toType(eface.typ)
  1406  }
  1407  
  1408  // ptrMap is the cache for PtrTo.
  1409  var ptrMap struct {
  1410  	sync.RWMutex
  1411  	m map[*rtype]*ptrType
  1412  }
  1413  
  1414  // PtrTo returns the pointer type with element t.
  1415  // For example, if t represents type Foo, PtrTo(t) represents *Foo.
  1416  func PtrTo(t Type) Type {
  1417  	return t.(*rtype).ptrTo()
  1418  }
  1419  
  1420  func (t *rtype) ptrTo() *rtype {
  1421  	if t.ptrToThis != 0 {
  1422  		return t.typeOff(t.ptrToThis)
  1423  	}
  1424  
  1425  	// Check the cache.
  1426  	ptrMap.RLock()
  1427  	if m := ptrMap.m; m != nil {
  1428  		if p := m[t]; p != nil {
  1429  			ptrMap.RUnlock()
  1430  			return &p.rtype
  1431  		}
  1432  	}
  1433  	ptrMap.RUnlock()
  1434  
  1435  	ptrMap.Lock()
  1436  	if ptrMap.m == nil {
  1437  		ptrMap.m = make(map[*rtype]*ptrType)
  1438  	}
  1439  	p := ptrMap.m[t]
  1440  	if p != nil {
  1441  		// some other goroutine won the race and created it
  1442  		ptrMap.Unlock()
  1443  		return &p.rtype
  1444  	}
  1445  
  1446  	// Look in known types.
  1447  	s := "*" + t.String()
  1448  	for _, tt := range typesByString(s) {
  1449  		p = (*ptrType)(unsafe.Pointer(tt))
  1450  		if p.elem == t {
  1451  			ptrMap.m[t] = p
  1452  			ptrMap.Unlock()
  1453  			return &p.rtype
  1454  		}
  1455  	}
  1456  
  1457  	// Create a new ptrType starting with the description
  1458  	// of an *unsafe.Pointer.
  1459  	p = new(ptrType)
  1460  	var iptr interface{} = (*unsafe.Pointer)(nil)
  1461  	prototype := *(**ptrType)(unsafe.Pointer(&iptr))
  1462  	*p = *prototype
  1463  
  1464  	p.str = resolveReflectName(newName(s, "", "", false))
  1465  
  1466  	// For the type structures linked into the binary, the
  1467  	// compiler provides a good hash of the string.
  1468  	// Create a good hash for the new string by using
  1469  	// the FNV-1 hash's mixing function to combine the
  1470  	// old hash and the new "*".
  1471  	p.hash = fnv1(t.hash, '*')
  1472  
  1473  	p.elem = t
  1474  
  1475  	ptrMap.m[t] = p
  1476  	ptrMap.Unlock()
  1477  	return &p.rtype
  1478  }
  1479  
  1480  // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
  1481  func fnv1(x uint32, list ...byte) uint32 {
  1482  	for _, b := range list {
  1483  		x = x*16777619 ^ uint32(b)
  1484  	}
  1485  	return x
  1486  }
  1487  
  1488  func (t *rtype) Implements(u Type) bool {
  1489  	if u == nil {
  1490  		panic("reflect: nil type passed to Type.Implements")
  1491  	}
  1492  	if u.Kind() != Interface {
  1493  		panic("reflect: non-interface type passed to Type.Implements")
  1494  	}
  1495  	return implements(u.(*rtype), t)
  1496  }
  1497  
  1498  func (t *rtype) AssignableTo(u Type) bool {
  1499  	if u == nil {
  1500  		panic("reflect: nil type passed to Type.AssignableTo")
  1501  	}
  1502  	uu := u.(*rtype)
  1503  	return directlyAssignable(uu, t) || implements(uu, t)
  1504  }
  1505  
  1506  func (t *rtype) ConvertibleTo(u Type) bool {
  1507  	if u == nil {
  1508  		panic("reflect: nil type passed to Type.ConvertibleTo")
  1509  	}
  1510  	uu := u.(*rtype)
  1511  	return convertOp(uu, t) != nil
  1512  }
  1513  
  1514  func (t *rtype) Comparable() bool {
  1515  	return t.alg != nil && t.alg.equal != nil
  1516  }
  1517  
  1518  // implements reports whether the type V implements the interface type T.
  1519  func implements(T, V *rtype) bool {
  1520  	if T.Kind() != Interface {
  1521  		return false
  1522  	}
  1523  	t := (*interfaceType)(unsafe.Pointer(T))
  1524  	if len(t.methods) == 0 {
  1525  		return true
  1526  	}
  1527  
  1528  	// The same algorithm applies in both cases, but the
  1529  	// method tables for an interface type and a concrete type
  1530  	// are different, so the code is duplicated.
  1531  	// In both cases the algorithm is a linear scan over the two
  1532  	// lists - T's methods and V's methods - simultaneously.
  1533  	// Since method tables are stored in a unique sorted order
  1534  	// (alphabetical, with no duplicate method names), the scan
  1535  	// through V's methods must hit a match for each of T's
  1536  	// methods along the way, or else V does not implement T.
  1537  	// This lets us run the scan in overall linear time instead of
  1538  	// the quadratic time  a naive search would require.
  1539  	// See also ../runtime/iface.go.
  1540  	if V.Kind() == Interface {
  1541  		v := (*interfaceType)(unsafe.Pointer(V))
  1542  		i := 0
  1543  		for j := 0; j < len(v.methods); j++ {
  1544  			tm := &t.methods[i]
  1545  			vm := &v.methods[j]
  1546  			if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
  1547  				if i++; i >= len(t.methods) {
  1548  					return true
  1549  				}
  1550  			}
  1551  		}
  1552  		return false
  1553  	}
  1554  
  1555  	v := V.uncommon()
  1556  	if v == nil {
  1557  		return false
  1558  	}
  1559  	i := 0
  1560  	vmethods := v.methods()
  1561  	for j := 0; j < int(v.mcount); j++ {
  1562  		tm := &t.methods[i]
  1563  		vm := vmethods[j]
  1564  		if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
  1565  			if i++; i >= len(t.methods) {
  1566  				return true
  1567  			}
  1568  		}
  1569  	}
  1570  	return false
  1571  }
  1572  
  1573  // directlyAssignable reports whether a value x of type V can be directly
  1574  // assigned (using memmove) to a value of type T.
  1575  // https://golang.org/doc/go_spec.html#Assignability
  1576  // Ignoring the interface rules (implemented elsewhere)
  1577  // and the ideal constant rules (no ideal constants at run time).
  1578  func directlyAssignable(T, V *rtype) bool {
  1579  	// x's type V is identical to T?
  1580  	if T == V {
  1581  		return true
  1582  	}
  1583  
  1584  	// Otherwise at least one of T and V must be unnamed
  1585  	// and they must have the same kind.
  1586  	if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
  1587  		return false
  1588  	}
  1589  
  1590  	// x's type T and V must  have identical underlying types.
  1591  	return haveIdenticalUnderlyingType(T, V)
  1592  }
  1593  
  1594  func haveIdenticalUnderlyingType(T, V *rtype) bool {
  1595  	if T == V {
  1596  		return true
  1597  	}
  1598  
  1599  	kind := T.Kind()
  1600  	if kind != V.Kind() {
  1601  		return false
  1602  	}
  1603  
  1604  	// Non-composite types of equal kind have same underlying type
  1605  	// (the predefined instance of the type).
  1606  	if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
  1607  		return true
  1608  	}
  1609  
  1610  	// Composite types.
  1611  	switch kind {
  1612  	case Array:
  1613  		return T.Elem() == V.Elem() && T.Len() == V.Len()
  1614  
  1615  	case Chan:
  1616  		// Special case:
  1617  		// x is a bidirectional channel value, T is a channel type,
  1618  		// and x's type V and T have identical element types.
  1619  		if V.ChanDir() == BothDir && T.Elem() == V.Elem() {
  1620  			return true
  1621  		}
  1622  
  1623  		// Otherwise continue test for identical underlying type.
  1624  		return V.ChanDir() == T.ChanDir() && T.Elem() == V.Elem()
  1625  
  1626  	case Func:
  1627  		t := (*funcType)(unsafe.Pointer(T))
  1628  		v := (*funcType)(unsafe.Pointer(V))
  1629  		if t.outCount != v.outCount || t.inCount != v.inCount {
  1630  			return false
  1631  		}
  1632  		for i := 0; i < t.NumIn(); i++ {
  1633  			if t.In(i) != v.In(i) {
  1634  				return false
  1635  			}
  1636  		}
  1637  		for i := 0; i < t.NumOut(); i++ {
  1638  			if t.Out(i) != v.Out(i) {
  1639  				return false
  1640  			}
  1641  		}
  1642  		return true
  1643  
  1644  	case Interface:
  1645  		t := (*interfaceType)(unsafe.Pointer(T))
  1646  		v := (*interfaceType)(unsafe.Pointer(V))
  1647  		if len(t.methods) == 0 && len(v.methods) == 0 {
  1648  			return true
  1649  		}
  1650  		// Might have the same methods but still
  1651  		// need a run time conversion.
  1652  		return false
  1653  
  1654  	case Map:
  1655  		return T.Key() == V.Key() && T.Elem() == V.Elem()
  1656  
  1657  	case Ptr, Slice:
  1658  		return T.Elem() == V.Elem()
  1659  
  1660  	case Struct:
  1661  		t := (*structType)(unsafe.Pointer(T))
  1662  		v := (*structType)(unsafe.Pointer(V))
  1663  		if len(t.fields) != len(v.fields) {
  1664  			return false
  1665  		}
  1666  		for i := range t.fields {
  1667  			tf := &t.fields[i]
  1668  			vf := &v.fields[i]
  1669  			if tf.name.name() != vf.name.name() {
  1670  				return false
  1671  			}
  1672  			if tf.typ != vf.typ {
  1673  				return false
  1674  			}
  1675  			if tf.name.tag() != vf.name.tag() {
  1676  				return false
  1677  			}
  1678  			if tf.offset != vf.offset {
  1679  				return false
  1680  			}
  1681  		}
  1682  		return true
  1683  	}
  1684  
  1685  	return false
  1686  }
  1687  
  1688  // typelinks is implemented in package runtime.
  1689  // It returns a slice of the sections in each module,
  1690  // and a slice of *rtype offsets in each module.
  1691  //
  1692  // The types in each module are sorted by string. That is, the first
  1693  // two linked types of the first module are:
  1694  //
  1695  //	d0 := sections[0]
  1696  //	t1 := (*rtype)(add(d0, offset[0][0]))
  1697  //	t2 := (*rtype)(add(d0, offset[0][1]))
  1698  //
  1699  // and
  1700  //
  1701  //	t1.String() < t2.String()
  1702  //
  1703  // Note that strings are not unique identifiers for types:
  1704  // there can be more than one with a given string.
  1705  // Only types we might want to look up are included:
  1706  // pointers, channels, maps, slices, and arrays.
  1707  func typelinks() (sections []unsafe.Pointer, offset [][]int32)
  1708  
  1709  func rtypeOff(section unsafe.Pointer, off int32) *rtype {
  1710  	return (*rtype)(add(section, uintptr(off)))
  1711  }
  1712  
  1713  // typesByString returns the subslice of typelinks() whose elements have
  1714  // the given string representation.
  1715  // It may be empty (no known types with that string) or may have
  1716  // multiple elements (multiple types with that string).
  1717  func typesByString(s string) []*rtype {
  1718  	sections, offset := typelinks()
  1719  	var ret []*rtype
  1720  
  1721  	for offsI, offs := range offset {
  1722  		section := sections[offsI]
  1723  
  1724  		// We are looking for the first index i where the string becomes >= s.
  1725  		// This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
  1726  		i, j := 0, len(offs)
  1727  		for i < j {
  1728  			h := i + (j-i)/2 // avoid overflow when computing h
  1729  			// i ≤ h < j
  1730  			if !(rtypeOff(section, offs[h]).String() >= s) {
  1731  				i = h + 1 // preserves f(i-1) == false
  1732  			} else {
  1733  				j = h // preserves f(j) == true
  1734  			}
  1735  		}
  1736  		// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i.
  1737  
  1738  		// Having found the first, linear scan forward to find the last.
  1739  		// We could do a second binary search, but the caller is going
  1740  		// to do a linear scan anyway.
  1741  		for j := i; j < len(offs); j++ {
  1742  			typ := rtypeOff(section, offs[j])
  1743  			if typ.String() != s {
  1744  				break
  1745  			}
  1746  			ret = append(ret, typ)
  1747  		}
  1748  	}
  1749  	return ret
  1750  }
  1751  
  1752  // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
  1753  var lookupCache struct {
  1754  	sync.RWMutex
  1755  	m map[cacheKey]*rtype
  1756  }
  1757  
  1758  // A cacheKey is the key for use in the lookupCache.
  1759  // Four values describe any of the types we are looking for:
  1760  // type kind, one or two subtypes, and an extra integer.
  1761  type cacheKey struct {
  1762  	kind  Kind
  1763  	t1    *rtype
  1764  	t2    *rtype
  1765  	extra uintptr
  1766  }
  1767  
  1768  // cacheGet looks for a type under the key k in the lookupCache.
  1769  // If it finds one, it returns that type.
  1770  // If not, it returns nil with the cache locked.
  1771  // The caller is expected to use cachePut to unlock the cache.
  1772  func cacheGet(k cacheKey) Type {
  1773  	lookupCache.RLock()
  1774  	t := lookupCache.m[k]
  1775  	lookupCache.RUnlock()
  1776  	if t != nil {
  1777  		return t
  1778  	}
  1779  
  1780  	lookupCache.Lock()
  1781  	t = lookupCache.m[k]
  1782  	if t != nil {
  1783  		lookupCache.Unlock()
  1784  		return t
  1785  	}
  1786  
  1787  	if lookupCache.m == nil {
  1788  		lookupCache.m = make(map[cacheKey]*rtype)
  1789  	}
  1790  
  1791  	return nil
  1792  }
  1793  
  1794  // cachePut stores the given type in the cache, unlocks the cache,
  1795  // and returns the type. It is expected that the cache is locked
  1796  // because cacheGet returned nil.
  1797  func cachePut(k cacheKey, t *rtype) Type {
  1798  	lookupCache.m[k] = t
  1799  	lookupCache.Unlock()
  1800  	return t
  1801  }
  1802  
  1803  // The funcLookupCache caches FuncOf lookups.
  1804  // FuncOf does not share the common lookupCache since cacheKey is not
  1805  // sufficient to represent functions unambiguously.
  1806  var funcLookupCache struct {
  1807  	sync.RWMutex
  1808  	m map[uint32][]*rtype // keyed by hash calculated in FuncOf
  1809  }
  1810  
  1811  // ChanOf returns the channel type with the given direction and element type.
  1812  // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
  1813  //
  1814  // The gc runtime imposes a limit of 64 kB on channel element types.
  1815  // If t's size is equal to or exceeds this limit, ChanOf panics.
  1816  func ChanOf(dir ChanDir, t Type) Type {
  1817  	typ := t.(*rtype)
  1818  
  1819  	// Look in cache.
  1820  	ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
  1821  	if ch := cacheGet(ckey); ch != nil {
  1822  		return ch
  1823  	}
  1824  
  1825  	// This restriction is imposed by the gc compiler and the runtime.
  1826  	if typ.size >= 1<<16 {
  1827  		lookupCache.Unlock()
  1828  		panic("reflect.ChanOf: element size too large")
  1829  	}
  1830  
  1831  	// Look in known types.
  1832  	// TODO: Precedence when constructing string.
  1833  	var s string
  1834  	switch dir {
  1835  	default:
  1836  		lookupCache.Unlock()
  1837  		panic("reflect.ChanOf: invalid dir")
  1838  	case SendDir:
  1839  		s = "chan<- " + typ.String()
  1840  	case RecvDir:
  1841  		s = "<-chan " + typ.String()
  1842  	case BothDir:
  1843  		s = "chan " + typ.String()
  1844  	}
  1845  	for _, tt := range typesByString(s) {
  1846  		ch := (*chanType)(unsafe.Pointer(tt))
  1847  		if ch.elem == typ && ch.dir == uintptr(dir) {
  1848  			return cachePut(ckey, tt)
  1849  		}
  1850  	}
  1851  
  1852  	// Make a channel type.
  1853  	var ichan interface{} = (chan unsafe.Pointer)(nil)
  1854  	prototype := *(**chanType)(unsafe.Pointer(&ichan))
  1855  	ch := new(chanType)
  1856  	*ch = *prototype
  1857  	ch.tflag = 0
  1858  	ch.dir = uintptr(dir)
  1859  	ch.str = resolveReflectName(newName(s, "", "", false))
  1860  	ch.hash = fnv1(typ.hash, 'c', byte(dir))
  1861  	ch.elem = typ
  1862  
  1863  	return cachePut(ckey, &ch.rtype)
  1864  }
  1865  
  1866  func ismapkey(*rtype) bool // implemented in runtime
  1867  
  1868  // MapOf returns the map type with the given key and element types.
  1869  // For example, if k represents int and e represents string,
  1870  // MapOf(k, e) represents map[int]string.
  1871  //
  1872  // If the key type is not a valid map key type (that is, if it does
  1873  // not implement Go's == operator), MapOf panics.
  1874  func MapOf(key, elem Type) Type {
  1875  	ktyp := key.(*rtype)
  1876  	etyp := elem.(*rtype)
  1877  
  1878  	if !ismapkey(ktyp) {
  1879  		panic("reflect.MapOf: invalid key type " + ktyp.String())
  1880  	}
  1881  
  1882  	// Look in cache.
  1883  	ckey := cacheKey{Map, ktyp, etyp, 0}
  1884  	if mt := cacheGet(ckey); mt != nil {
  1885  		return mt
  1886  	}
  1887  
  1888  	// Look in known types.
  1889  	s := "map[" + ktyp.String() + "]" + etyp.String()
  1890  	for _, tt := range typesByString(s) {
  1891  		mt := (*mapType)(unsafe.Pointer(tt))
  1892  		if mt.key == ktyp && mt.elem == etyp {
  1893  			return cachePut(ckey, tt)
  1894  		}
  1895  	}
  1896  
  1897  	// Make a map type.
  1898  	var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
  1899  	mt := new(mapType)
  1900  	*mt = **(**mapType)(unsafe.Pointer(&imap))
  1901  	mt.str = resolveReflectName(newName(s, "", "", false))
  1902  	mt.tflag = 0
  1903  	mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
  1904  	mt.key = ktyp
  1905  	mt.elem = etyp
  1906  	mt.bucket = bucketOf(ktyp, etyp)
  1907  	if ktyp.size > maxKeySize {
  1908  		mt.keysize = uint8(ptrSize)
  1909  		mt.indirectkey = 1
  1910  	} else {
  1911  		mt.keysize = uint8(ktyp.size)
  1912  		mt.indirectkey = 0
  1913  	}
  1914  	if etyp.size > maxValSize {
  1915  		mt.valuesize = uint8(ptrSize)
  1916  		mt.indirectvalue = 1
  1917  	} else {
  1918  		mt.valuesize = uint8(etyp.size)
  1919  		mt.indirectvalue = 0
  1920  	}
  1921  	mt.bucketsize = uint16(mt.bucket.size)
  1922  	mt.reflexivekey = isReflexive(ktyp)
  1923  	mt.needkeyupdate = needKeyUpdate(ktyp)
  1924  	mt.ptrToThis = 0
  1925  
  1926  	return cachePut(ckey, &mt.rtype)
  1927  }
  1928  
  1929  type funcTypeFixed4 struct {
  1930  	funcType
  1931  	args [4]*rtype
  1932  }
  1933  type funcTypeFixed8 struct {
  1934  	funcType
  1935  	args [8]*rtype
  1936  }
  1937  type funcTypeFixed16 struct {
  1938  	funcType
  1939  	args [16]*rtype
  1940  }
  1941  type funcTypeFixed32 struct {
  1942  	funcType
  1943  	args [32]*rtype
  1944  }
  1945  type funcTypeFixed64 struct {
  1946  	funcType
  1947  	args [64]*rtype
  1948  }
  1949  type funcTypeFixed128 struct {
  1950  	funcType
  1951  	args [128]*rtype
  1952  }
  1953  
  1954  // FuncOf returns the function type with the given argument and result types.
  1955  // For example if k represents int and e represents string,
  1956  // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
  1957  //
  1958  // The variadic argument controls whether the function is variadic. FuncOf
  1959  // panics if the in[len(in)-1] does not represent a slice and variadic is
  1960  // true.
  1961  func FuncOf(in, out []Type, variadic bool) Type {
  1962  	if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
  1963  		panic("reflect.FuncOf: last arg of variadic func must be slice")
  1964  	}
  1965  
  1966  	// Make a func type.
  1967  	var ifunc interface{} = (func())(nil)
  1968  	prototype := *(**funcType)(unsafe.Pointer(&ifunc))
  1969  	n := len(in) + len(out)
  1970  
  1971  	var ft *funcType
  1972  	var args []*rtype
  1973  	switch {
  1974  	case n <= 4:
  1975  		fixed := new(funcTypeFixed4)
  1976  		args = fixed.args[:0:len(fixed.args)]
  1977  		ft = &fixed.funcType
  1978  	case n <= 8:
  1979  		fixed := new(funcTypeFixed8)
  1980  		args = fixed.args[:0:len(fixed.args)]
  1981  		ft = &fixed.funcType
  1982  	case n <= 16:
  1983  		fixed := new(funcTypeFixed16)
  1984  		args = fixed.args[:0:len(fixed.args)]
  1985  		ft = &fixed.funcType
  1986  	case n <= 32:
  1987  		fixed := new(funcTypeFixed32)
  1988  		args = fixed.args[:0:len(fixed.args)]
  1989  		ft = &fixed.funcType
  1990  	case n <= 64:
  1991  		fixed := new(funcTypeFixed64)
  1992  		args = fixed.args[:0:len(fixed.args)]
  1993  		ft = &fixed.funcType
  1994  	case n <= 128:
  1995  		fixed := new(funcTypeFixed128)
  1996  		args = fixed.args[:0:len(fixed.args)]
  1997  		ft = &fixed.funcType
  1998  	default:
  1999  		panic("reflect.FuncOf: too many arguments")
  2000  	}
  2001  	*ft = *prototype
  2002  
  2003  	// Build a hash and minimally populate ft.
  2004  	var hash uint32
  2005  	for _, in := range in {
  2006  		t := in.(*rtype)
  2007  		args = append(args, t)
  2008  		hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
  2009  	}
  2010  	if variadic {
  2011  		hash = fnv1(hash, 'v')
  2012  	}
  2013  	hash = fnv1(hash, '.')
  2014  	for _, out := range out {
  2015  		t := out.(*rtype)
  2016  		args = append(args, t)
  2017  		hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
  2018  	}
  2019  	if len(args) > 50 {
  2020  		panic("reflect.FuncOf does not support more than 50 arguments")
  2021  	}
  2022  	ft.tflag = 0
  2023  	ft.hash = hash
  2024  	ft.inCount = uint16(len(in))
  2025  	ft.outCount = uint16(len(out))
  2026  	if variadic {
  2027  		ft.outCount |= 1 << 15
  2028  	}
  2029  
  2030  	// Look in cache.
  2031  	funcLookupCache.RLock()
  2032  	for _, t := range funcLookupCache.m[hash] {
  2033  		if haveIdenticalUnderlyingType(&ft.rtype, t) {
  2034  			funcLookupCache.RUnlock()
  2035  			return t
  2036  		}
  2037  	}
  2038  	funcLookupCache.RUnlock()
  2039  
  2040  	// Not in cache, lock and retry.
  2041  	funcLookupCache.Lock()
  2042  	defer funcLookupCache.Unlock()
  2043  	if funcLookupCache.m == nil {
  2044  		funcLookupCache.m = make(map[uint32][]*rtype)
  2045  	}
  2046  	for _, t := range funcLookupCache.m[hash] {
  2047  		if haveIdenticalUnderlyingType(&ft.rtype, t) {
  2048  			return t
  2049  		}
  2050  	}
  2051  
  2052  	// Look in known types for the same string representation.
  2053  	str := funcStr(ft)
  2054  	for _, tt := range typesByString(str) {
  2055  		if haveIdenticalUnderlyingType(&ft.rtype, tt) {
  2056  			funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt)
  2057  			return tt
  2058  		}
  2059  	}
  2060  
  2061  	// Populate the remaining fields of ft and store in cache.
  2062  	ft.str = resolveReflectName(newName(str, "", "", false))
  2063  	ft.ptrToThis = 0
  2064  	funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
  2065  
  2066  	return &ft.rtype
  2067  }
  2068  
  2069  // funcStr builds a string representation of a funcType.
  2070  func funcStr(ft *funcType) string {
  2071  	repr := make([]byte, 0, 64)
  2072  	repr = append(repr, "func("...)
  2073  	for i, t := range ft.in() {
  2074  		if i > 0 {
  2075  			repr = append(repr, ", "...)
  2076  		}
  2077  		if ft.IsVariadic() && i == int(ft.inCount)-1 {
  2078  			repr = append(repr, "..."...)
  2079  			repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
  2080  		} else {
  2081  			repr = append(repr, t.String()...)
  2082  		}
  2083  	}
  2084  	repr = append(repr, ')')
  2085  	out := ft.out()
  2086  	if len(out) == 1 {
  2087  		repr = append(repr, ' ')
  2088  	} else if len(out) > 1 {
  2089  		repr = append(repr, " ("...)
  2090  	}
  2091  	for i, t := range out {
  2092  		if i > 0 {
  2093  			repr = append(repr, ", "...)
  2094  		}
  2095  		repr = append(repr, t.String()...)
  2096  	}
  2097  	if len(out) > 1 {
  2098  		repr = append(repr, ')')
  2099  	}
  2100  	return string(repr)
  2101  }
  2102  
  2103  // isReflexive reports whether the == operation on the type is reflexive.
  2104  // That is, x == x for all values x of type t.
  2105  func isReflexive(t *rtype) bool {
  2106  	switch t.Kind() {
  2107  	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
  2108  		return true
  2109  	case Float32, Float64, Complex64, Complex128, Interface:
  2110  		return false
  2111  	case Array:
  2112  		tt := (*arrayType)(unsafe.Pointer(t))
  2113  		return isReflexive(tt.elem)
  2114  	case Struct:
  2115  		tt := (*structType)(unsafe.Pointer(t))
  2116  		for _, f := range tt.fields {
  2117  			if !isReflexive(f.typ) {
  2118  				return false
  2119  			}
  2120  		}
  2121  		return true
  2122  	default:
  2123  		// Func, Map, Slice, Invalid
  2124  		panic("isReflexive called on non-key type " + t.String())
  2125  	}
  2126  }
  2127  
  2128  // needKeyUpdate reports whether map overwrites require the key to be copied.
  2129  func needKeyUpdate(t *rtype) bool {
  2130  	switch t.Kind() {
  2131  	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
  2132  		return false
  2133  	case Float32, Float64, Complex64, Complex128, Interface, String:
  2134  		// Float keys can be updated from +0 to -0.
  2135  		// String keys can be updated to use a smaller backing store.
  2136  		// Interfaces might have floats of strings in them.
  2137  		return true
  2138  	case Array:
  2139  		tt := (*arrayType)(unsafe.Pointer(t))
  2140  		return needKeyUpdate(tt.elem)
  2141  	case Struct:
  2142  		tt := (*structType)(unsafe.Pointer(t))
  2143  		for _, f := range tt.fields {
  2144  			if needKeyUpdate(f.typ) {
  2145  				return true
  2146  			}
  2147  		}
  2148  		return false
  2149  	default:
  2150  		// Func, Map, Slice, Invalid
  2151  		panic("needKeyUpdate called on non-key type " + t.String())
  2152  	}
  2153  }
  2154  
  2155  // Make sure these routines stay in sync with ../../runtime/hashmap.go!
  2156  // These types exist only for GC, so we only fill out GC relevant info.
  2157  // Currently, that's just size and the GC program. We also fill in string
  2158  // for possible debugging use.
  2159  const (
  2160  	bucketSize uintptr = 8
  2161  	maxKeySize uintptr = 128
  2162  	maxValSize uintptr = 128
  2163  )
  2164  
  2165  func bucketOf(ktyp, etyp *rtype) *rtype {
  2166  	// See comment on hmap.overflow in ../runtime/hashmap.go.
  2167  	var kind uint8
  2168  	if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
  2169  		ktyp.size <= maxKeySize && etyp.size <= maxValSize {
  2170  		kind = kindNoPointers
  2171  	}
  2172  
  2173  	if ktyp.size > maxKeySize {
  2174  		ktyp = PtrTo(ktyp).(*rtype)
  2175  	}
  2176  	if etyp.size > maxValSize {
  2177  		etyp = PtrTo(etyp).(*rtype)
  2178  	}
  2179  
  2180  	// Prepare GC data if any.
  2181  	// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
  2182  	// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
  2183  	// Normally the enforced limit on pointer maps is 16 bytes,
  2184  	// but larger ones are acceptable, 33 bytes isn't too too big,
  2185  	// and it's easier to generate a pointer bitmap than a GC program.
  2186  	// Note that since the key and value are known to be <= 128 bytes,
  2187  	// they're guaranteed to have bitmaps instead of GC programs.
  2188  	var gcdata *byte
  2189  	var ptrdata uintptr
  2190  	var overflowPad uintptr
  2191  
  2192  	// On NaCl, pad if needed to make overflow end at the proper struct alignment.
  2193  	// On other systems, align > ptrSize is not possible.
  2194  	if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
  2195  		overflowPad = ptrSize
  2196  	}
  2197  	size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
  2198  	if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
  2199  		panic("reflect: bad size computation in MapOf")
  2200  	}
  2201  
  2202  	if kind != kindNoPointers {
  2203  		nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
  2204  		mask := make([]byte, (nptr+7)/8)
  2205  		base := bucketSize / ptrSize
  2206  
  2207  		if ktyp.kind&kindNoPointers == 0 {
  2208  			if ktyp.kind&kindGCProg != 0 {
  2209  				panic("reflect: unexpected GC program in MapOf")
  2210  			}
  2211  			kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
  2212  			for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
  2213  				if (kmask[i/8]>>(i%8))&1 != 0 {
  2214  					for j := uintptr(0); j < bucketSize; j++ {
  2215  						word := base + j*ktyp.size/ptrSize + i
  2216  						mask[word/8] |= 1 << (word % 8)
  2217  					}
  2218  				}
  2219  			}
  2220  		}
  2221  		base += bucketSize * ktyp.size / ptrSize
  2222  
  2223  		if etyp.kind&kindNoPointers == 0 {
  2224  			if etyp.kind&kindGCProg != 0 {
  2225  				panic("reflect: unexpected GC program in MapOf")
  2226  			}
  2227  			emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
  2228  			for i := uintptr(0); i < etyp.size/ptrSize; i++ {
  2229  				if (emask[i/8]>>(i%8))&1 != 0 {
  2230  					for j := uintptr(0); j < bucketSize; j++ {
  2231  						word := base + j*etyp.size/ptrSize + i
  2232  						mask[word/8] |= 1 << (word % 8)
  2233  					}
  2234  				}
  2235  			}
  2236  		}
  2237  		base += bucketSize * etyp.size / ptrSize
  2238  		base += overflowPad / ptrSize
  2239  
  2240  		word := base
  2241  		mask[word/8] |= 1 << (word % 8)
  2242  		gcdata = &mask[0]
  2243  		ptrdata = (word + 1) * ptrSize
  2244  
  2245  		// overflow word must be last
  2246  		if ptrdata != size {
  2247  			panic("reflect: bad layout computation in MapOf")
  2248  		}
  2249  	}
  2250  
  2251  	b := new(rtype)
  2252  	b.align = ptrSize
  2253  	if overflowPad > 0 {
  2254  		b.align = 8
  2255  	}
  2256  	b.size = size
  2257  	b.ptrdata = ptrdata
  2258  	b.kind = kind
  2259  	b.gcdata = gcdata
  2260  	s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
  2261  	b.str = resolveReflectName(newName(s, "", "", false))
  2262  	return b
  2263  }
  2264  
  2265  // SliceOf returns the slice type with element type t.
  2266  // For example, if t represents int, SliceOf(t) represents []int.
  2267  func SliceOf(t Type) Type {
  2268  	typ := t.(*rtype)
  2269  
  2270  	// Look in cache.
  2271  	ckey := cacheKey{Slice, typ, nil, 0}
  2272  	if slice := cacheGet(ckey); slice != nil {
  2273  		return slice
  2274  	}
  2275  
  2276  	// Look in known types.
  2277  	s := "[]" + typ.String()
  2278  	for _, tt := range typesByString(s) {
  2279  		slice := (*sliceType)(unsafe.Pointer(tt))
  2280  		if slice.elem == typ {
  2281  			return cachePut(ckey, tt)
  2282  		}
  2283  	}
  2284  
  2285  	// Make a slice type.
  2286  	var islice interface{} = ([]unsafe.Pointer)(nil)
  2287  	prototype := *(**sliceType)(unsafe.Pointer(&islice))
  2288  	slice := new(sliceType)
  2289  	*slice = *prototype
  2290  	slice.tflag = 0
  2291  	slice.str = resolveReflectName(newName(s, "", "", false))
  2292  	slice.hash = fnv1(typ.hash, '[')
  2293  	slice.elem = typ
  2294  	slice.ptrToThis = 0
  2295  
  2296  	return cachePut(ckey, &slice.rtype)
  2297  }
  2298  
  2299  // The structLookupCache caches StructOf lookups.
  2300  // StructOf does not share the common lookupCache since we need to pin
  2301  // the memory associated with *structTypeFixedN.
  2302  var structLookupCache struct {
  2303  	sync.RWMutex
  2304  	m map[uint32][]interface {
  2305  		common() *rtype
  2306  	} // keyed by hash calculated in StructOf
  2307  }
  2308  
  2309  type structTypeUncommon struct {
  2310  	structType
  2311  	u uncommonType
  2312  }
  2313  
  2314  // A *rtype representing a struct is followed directly in memory by an
  2315  // array of method objects representing the methods attached to the
  2316  // struct. To get the same layout for a run time generated type, we
  2317  // need an array directly following the uncommonType memory. The types
  2318  // structTypeFixed4, ...structTypeFixedN are used to do this.
  2319  //
  2320  // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
  2321  
  2322  // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs
  2323  // have no methods, they could be defined at runtime using the StructOf
  2324  // function.
  2325  
  2326  type structTypeFixed4 struct {
  2327  	structType
  2328  	u uncommonType
  2329  	m [4]method
  2330  }
  2331  
  2332  type structTypeFixed8 struct {
  2333  	structType
  2334  	u uncommonType
  2335  	m [8]method
  2336  }
  2337  
  2338  type structTypeFixed16 struct {
  2339  	structType
  2340  	u uncommonType
  2341  	m [16]method
  2342  }
  2343  
  2344  type structTypeFixed32 struct {
  2345  	structType
  2346  	u uncommonType
  2347  	m [32]method
  2348  }
  2349  
  2350  // StructOf returns the struct type containing fields.
  2351  // The Offset and Index fields are ignored and computed as they would be
  2352  // by the compiler.
  2353  //
  2354  // StructOf currently does not generate wrapper methods for embedded fields.
  2355  // This limitation may be lifted in a future version.
  2356  func StructOf(fields []StructField) Type {
  2357  	var (
  2358  		hash       = fnv1(0, []byte("struct {")...)
  2359  		size       uintptr
  2360  		typalign   uint8
  2361  		comparable = true
  2362  		hashable   = true
  2363  		methods    []method
  2364  
  2365  		fs   = make([]structField, len(fields))
  2366  		repr = make([]byte, 0, 64)
  2367  		fset = map[string]struct{}{} // fields' names
  2368  
  2369  		hasPtr    = false // records whether at least one struct-field is a pointer
  2370  		hasGCProg = false // records whether a struct-field type has a GCProg
  2371  	)
  2372  
  2373  	repr = append(repr, "struct {"...)
  2374  	for i, field := range fields {
  2375  		if field.Type == nil {
  2376  			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
  2377  		}
  2378  		f := runtimeStructField(field)
  2379  		ft := f.typ
  2380  		if ft.kind&kindGCProg != 0 {
  2381  			hasGCProg = true
  2382  		}
  2383  		if ft.pointers() {
  2384  			hasPtr = true
  2385  		}
  2386  
  2387  		name := ""
  2388  		// Update string and hash
  2389  		if f.name.nameLen() > 0 {
  2390  			hash = fnv1(hash, []byte(f.name.name())...)
  2391  			repr = append(repr, (" " + f.name.name())...)
  2392  			name = f.name.name()
  2393  		} else {
  2394  			// Embedded field
  2395  			if f.typ.Kind() == Ptr {
  2396  				// Embedded ** and *interface{} are illegal
  2397  				elem := ft.Elem()
  2398  				if k := elem.Kind(); k == Ptr || k == Interface {
  2399  					panic("reflect.StructOf: illegal anonymous field type " + ft.String())
  2400  				}
  2401  				name = elem.String()
  2402  			} else {
  2403  				name = ft.String()
  2404  			}
  2405  			// TODO(sbinet) check for syntactically impossible type names?
  2406  
  2407  			switch f.typ.Kind() {
  2408  			case Interface:
  2409  				ift := (*interfaceType)(unsafe.Pointer(ft))
  2410  				for im, m := range ift.methods {
  2411  					if ift.nameOff(m.name).pkgPath() != "" {
  2412  						// TODO(sbinet)
  2413  						panic("reflect: embedded interface with unexported method(s) not implemented")
  2414  					}
  2415  
  2416  					var (
  2417  						mtyp    = ift.typeOff(m.typ)
  2418  						ifield  = i
  2419  						imethod = im
  2420  						ifn     Value
  2421  						tfn     Value
  2422  					)
  2423  
  2424  					if ft.kind&kindDirectIface != 0 {
  2425  						tfn = MakeFunc(mtyp, func(in []Value) []Value {
  2426  							var args []Value
  2427  							var recv = in[0]
  2428  							if len(in) > 1 {
  2429  								args = in[1:]
  2430  							}
  2431  							return recv.Field(ifield).Method(imethod).Call(args)
  2432  						})
  2433  						ifn = MakeFunc(mtyp, func(in []Value) []Value {
  2434  							var args []Value
  2435  							var recv = in[0]
  2436  							if len(in) > 1 {
  2437  								args = in[1:]
  2438  							}
  2439  							return recv.Field(ifield).Method(imethod).Call(args)
  2440  						})
  2441  					} else {
  2442  						tfn = MakeFunc(mtyp, func(in []Value) []Value {
  2443  							var args []Value
  2444  							var recv = in[0]
  2445  							if len(in) > 1 {
  2446  								args = in[1:]
  2447  							}
  2448  							return recv.Field(ifield).Method(imethod).Call(args)
  2449  						})
  2450  						ifn = MakeFunc(mtyp, func(in []Value) []Value {
  2451  							var args []Value
  2452  							var recv = Indirect(in[0])
  2453  							if len(in) > 1 {
  2454  								args = in[1:]
  2455  							}
  2456  							return recv.Field(ifield).Method(imethod).Call(args)
  2457  						})
  2458  					}
  2459  
  2460  					methods = append(methods, method{
  2461  						name: resolveReflectName(ift.nameOff(m.name)),
  2462  						mtyp: resolveReflectType(mtyp),
  2463  						ifn:  resolveReflectText(unsafe.Pointer(&ifn)),
  2464  						tfn:  resolveReflectText(unsafe.Pointer(&tfn)),
  2465  					})
  2466  				}
  2467  			case Ptr:
  2468  				ptr := (*ptrType)(unsafe.Pointer(ft))
  2469  				if unt := ptr.uncommon(); unt != nil {
  2470  					for _, m := range unt.methods() {
  2471  						mname := ptr.nameOff(m.name)
  2472  						if mname.pkgPath() != "" {
  2473  							// TODO(sbinet)
  2474  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2475  						}
  2476  						methods = append(methods, method{
  2477  							name: resolveReflectName(mname),
  2478  							mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
  2479  							ifn:  resolveReflectText(ptr.textOff(m.ifn)),
  2480  							tfn:  resolveReflectText(ptr.textOff(m.tfn)),
  2481  						})
  2482  					}
  2483  				}
  2484  				if unt := ptr.elem.uncommon(); unt != nil {
  2485  					for _, m := range unt.methods() {
  2486  						mname := ptr.nameOff(m.name)
  2487  						if mname.pkgPath() != "" {
  2488  							// TODO(sbinet)
  2489  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2490  						}
  2491  						methods = append(methods, method{
  2492  							name: resolveReflectName(mname),
  2493  							mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
  2494  							ifn:  resolveReflectText(ptr.elem.textOff(m.ifn)),
  2495  							tfn:  resolveReflectText(ptr.elem.textOff(m.tfn)),
  2496  						})
  2497  					}
  2498  				}
  2499  			default:
  2500  				if unt := ft.uncommon(); unt != nil {
  2501  					for _, m := range unt.methods() {
  2502  						mname := ft.nameOff(m.name)
  2503  						if mname.pkgPath() != "" {
  2504  							// TODO(sbinet)
  2505  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2506  						}
  2507  						methods = append(methods, method{
  2508  							name: resolveReflectName(mname),
  2509  							mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
  2510  							ifn:  resolveReflectText(ft.textOff(m.ifn)),
  2511  							tfn:  resolveReflectText(ft.textOff(m.tfn)),
  2512  						})
  2513  
  2514  					}
  2515  				}
  2516  			}
  2517  		}
  2518  		if _, dup := fset[name]; dup {
  2519  			panic("reflect.StructOf: duplicate field " + name)
  2520  		}
  2521  		fset[name] = struct{}{}
  2522  
  2523  		hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
  2524  
  2525  		repr = append(repr, (" " + ft.String())...)
  2526  		if f.name.tagLen() > 0 {
  2527  			hash = fnv1(hash, []byte(f.name.tag())...)
  2528  			repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
  2529  		}
  2530  		if i < len(fields)-1 {
  2531  			repr = append(repr, ';')
  2532  		}
  2533  
  2534  		comparable = comparable && (ft.alg.equal != nil)
  2535  		hashable = hashable && (ft.alg.hash != nil)
  2536  
  2537  		f.offset = align(size, uintptr(ft.align))
  2538  		if ft.align > typalign {
  2539  			typalign = ft.align
  2540  		}
  2541  		size = f.offset + ft.size
  2542  
  2543  		fs[i] = f
  2544  	}
  2545  
  2546  	var typ *structType
  2547  	var ut *uncommonType
  2548  	var typPin interface {
  2549  		common() *rtype
  2550  	} // structTypeFixedN
  2551  
  2552  	switch {
  2553  	case len(methods) == 0:
  2554  		t := new(structTypeUncommon)
  2555  		typ = &t.structType
  2556  		ut = &t.u
  2557  		typPin = t
  2558  	case len(methods) <= 4:
  2559  		t := new(structTypeFixed4)
  2560  		typ = &t.structType
  2561  		ut = &t.u
  2562  		copy(t.m[:], methods)
  2563  		typPin = t
  2564  	case len(methods) <= 8:
  2565  		t := new(structTypeFixed8)
  2566  		typ = &t.structType
  2567  		ut = &t.u
  2568  		copy(t.m[:], methods)
  2569  		typPin = t
  2570  	case len(methods) <= 16:
  2571  		t := new(structTypeFixed16)
  2572  		typ = &t.structType
  2573  		ut = &t.u
  2574  		copy(t.m[:], methods)
  2575  		typPin = t
  2576  	case len(methods) <= 32:
  2577  		t := new(structTypeFixed32)
  2578  		typ = &t.structType
  2579  		ut = &t.u
  2580  		copy(t.m[:], methods)
  2581  		typPin = t
  2582  	default:
  2583  		panic("reflect.StructOf: too many methods")
  2584  	}
  2585  	ut.mcount = uint16(len(methods))
  2586  	ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
  2587  
  2588  	if len(fs) > 0 {
  2589  		repr = append(repr, ' ')
  2590  	}
  2591  	repr = append(repr, '}')
  2592  	hash = fnv1(hash, '}')
  2593  	str := string(repr)
  2594  
  2595  	// Round the size up to be a multiple of the alignment.
  2596  	size = align(size, uintptr(typalign))
  2597  
  2598  	// Make the struct type.
  2599  	var istruct interface{} = struct{}{}
  2600  	prototype := *(**structType)(unsafe.Pointer(&istruct))
  2601  	*typ = *prototype
  2602  	typ.fields = fs
  2603  
  2604  	// Look in cache
  2605  	structLookupCache.RLock()
  2606  	for _, st := range structLookupCache.m[hash] {
  2607  		t := st.common()
  2608  		if haveIdenticalUnderlyingType(&typ.rtype, t) {
  2609  			structLookupCache.RUnlock()
  2610  			return t
  2611  		}
  2612  	}
  2613  	structLookupCache.RUnlock()
  2614  
  2615  	// not in cache, lock and retry
  2616  	structLookupCache.Lock()
  2617  	defer structLookupCache.Unlock()
  2618  	if structLookupCache.m == nil {
  2619  		structLookupCache.m = make(map[uint32][]interface {
  2620  			common() *rtype
  2621  		})
  2622  	}
  2623  	for _, st := range structLookupCache.m[hash] {
  2624  		t := st.common()
  2625  		if haveIdenticalUnderlyingType(&typ.rtype, t) {
  2626  			return t
  2627  		}
  2628  	}
  2629  
  2630  	// Look in known types.
  2631  	for _, t := range typesByString(str) {
  2632  		if haveIdenticalUnderlyingType(&typ.rtype, t) {
  2633  			// even if 't' wasn't a structType with methods, we should be ok
  2634  			// as the 'u uncommonType' field won't be accessed except when
  2635  			// tflag&tflagUncommon is set.
  2636  			structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
  2637  			return t
  2638  		}
  2639  	}
  2640  
  2641  	typ.str = resolveReflectName(newName(str, "", "", false))
  2642  	typ.tflag = 0
  2643  	typ.hash = hash
  2644  	typ.size = size
  2645  	typ.align = typalign
  2646  	typ.fieldAlign = typalign
  2647  	if len(methods) > 0 {
  2648  		typ.tflag |= tflagUncommon
  2649  	}
  2650  	if !hasPtr {
  2651  		typ.kind |= kindNoPointers
  2652  	} else {
  2653  		typ.kind &^= kindNoPointers
  2654  	}
  2655  
  2656  	if hasGCProg {
  2657  		lastPtrField := 0
  2658  		for i, ft := range fs {
  2659  			if ft.typ.pointers() {
  2660  				lastPtrField = i
  2661  			}
  2662  		}
  2663  		prog := []byte{0, 0, 0, 0} // will be length of prog
  2664  		for i, ft := range fs {
  2665  			if i > lastPtrField {
  2666  				// gcprog should not include anything for any field after
  2667  				// the last field that contains pointer data
  2668  				break
  2669  			}
  2670  			// FIXME(sbinet) handle padding, fields smaller than a word
  2671  			elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
  2672  			elemPtrs := ft.typ.ptrdata / ptrSize
  2673  			switch {
  2674  			case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
  2675  				// Element is small with pointer mask; use as literal bits.
  2676  				mask := elemGC
  2677  				// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
  2678  				var n uintptr
  2679  				for n := elemPtrs; n > 120; n -= 120 {
  2680  					prog = append(prog, 120)
  2681  					prog = append(prog, mask[:15]...)
  2682  					mask = mask[15:]
  2683  				}
  2684  				prog = append(prog, byte(n))
  2685  				prog = append(prog, mask[:(n+7)/8]...)
  2686  			case ft.typ.kind&kindGCProg != 0:
  2687  				// Element has GC program; emit one element.
  2688  				elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
  2689  				prog = append(prog, elemProg...)
  2690  			}
  2691  			// Pad from ptrdata to size.
  2692  			elemWords := ft.typ.size / ptrSize
  2693  			if elemPtrs < elemWords {
  2694  				// Emit literal 0 bit, then repeat as needed.
  2695  				prog = append(prog, 0x01, 0x00)
  2696  				if elemPtrs+1 < elemWords {
  2697  					prog = append(prog, 0x81)
  2698  					prog = appendVarint(prog, elemWords-elemPtrs-1)
  2699  				}
  2700  			}
  2701  		}
  2702  		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
  2703  		typ.kind |= kindGCProg
  2704  		typ.gcdata = &prog[0]
  2705  	} else {
  2706  		typ.kind &^= kindGCProg
  2707  		bv := new(bitVector)
  2708  		addTypeBits(bv, 0, typ.common())
  2709  		if len(bv.data) > 0 {
  2710  			typ.gcdata = &bv.data[0]
  2711  		}
  2712  	}
  2713  	typ.ptrdata = typeptrdata(typ.common())
  2714  	typ.alg = new(typeAlg)
  2715  	if hashable {
  2716  		typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
  2717  			o := seed
  2718  			for _, ft := range typ.fields {
  2719  				pi := unsafe.Pointer(uintptr(p) + ft.offset)
  2720  				o = ft.typ.alg.hash(pi, o)
  2721  			}
  2722  			return o
  2723  		}
  2724  	}
  2725  
  2726  	if comparable {
  2727  		typ.alg.equal = func(p, q unsafe.Pointer) bool {
  2728  			for _, ft := range typ.fields {
  2729  				pi := unsafe.Pointer(uintptr(p) + ft.offset)
  2730  				qi := unsafe.Pointer(uintptr(q) + ft.offset)
  2731  				if !ft.typ.alg.equal(pi, qi) {
  2732  					return false
  2733  				}
  2734  			}
  2735  			return true
  2736  		}
  2737  	}
  2738  
  2739  	switch {
  2740  	case len(fs) == 1 && !ifaceIndir(fs[0].typ):
  2741  		// structs of 1 direct iface type can be direct
  2742  		typ.kind |= kindDirectIface
  2743  	default:
  2744  		typ.kind &^= kindDirectIface
  2745  	}
  2746  
  2747  	structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
  2748  	return &typ.rtype
  2749  }
  2750  
  2751  func runtimeStructField(field StructField) structField {
  2752  	exported := field.PkgPath == ""
  2753  	if field.Name == "" {
  2754  		t := field.Type.(*rtype)
  2755  		if t.Kind() == Ptr {
  2756  			t = t.Elem().(*rtype)
  2757  		}
  2758  		exported = t.nameOff(t.str).isExported()
  2759  	} else if exported {
  2760  		b0 := field.Name[0]
  2761  		if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
  2762  			panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
  2763  		}
  2764  	}
  2765  
  2766  	_ = resolveReflectType(field.Type.common())
  2767  	return structField{
  2768  		name:   newName(field.Name, string(field.Tag), field.PkgPath, exported),
  2769  		typ:    field.Type.common(),
  2770  		offset: 0,
  2771  	}
  2772  }
  2773  
  2774  // typeptrdata returns the length in bytes of the prefix of t
  2775  // containing pointer data. Anything after this offset is scalar data.
  2776  // keep in sync with ../cmd/compile/internal/gc/reflect.go
  2777  func typeptrdata(t *rtype) uintptr {
  2778  	if !t.pointers() {
  2779  		return 0
  2780  	}
  2781  	switch t.Kind() {
  2782  	case Struct:
  2783  		st := (*structType)(unsafe.Pointer(t))
  2784  		// find the last field that has pointers.
  2785  		field := 0
  2786  		for i := range st.fields {
  2787  			ft := st.fields[i].typ
  2788  			if ft.pointers() {
  2789  				field = i
  2790  			}
  2791  		}
  2792  		f := st.fields[field]
  2793  		return f.offset + f.typ.ptrdata
  2794  
  2795  	default:
  2796  		panic("reflect.typeptrdata: unexpected type, " + t.String())
  2797  	}
  2798  }
  2799  
  2800  // See cmd/compile/internal/gc/reflect.go for derivation of constant.
  2801  const maxPtrmaskBytes = 2048
  2802  
  2803  // ArrayOf returns the array type with the given count and element type.
  2804  // For example, if t represents int, ArrayOf(5, t) represents [5]int.
  2805  //
  2806  // If the resulting type would be larger than the available address space,
  2807  // ArrayOf panics.
  2808  func ArrayOf(count int, elem Type) Type {
  2809  	typ := elem.(*rtype)
  2810  	// call SliceOf here as it calls cacheGet/cachePut.
  2811  	// ArrayOf also calls cacheGet/cachePut and thus may modify the state of
  2812  	// the lookupCache mutex.
  2813  	slice := SliceOf(elem)
  2814  
  2815  	// Look in cache.
  2816  	ckey := cacheKey{Array, typ, nil, uintptr(count)}
  2817  	if array := cacheGet(ckey); array != nil {
  2818  		return array
  2819  	}
  2820  
  2821  	// Look in known types.
  2822  	s := "[" + strconv.Itoa(count) + "]" + typ.String()
  2823  	for _, tt := range typesByString(s) {
  2824  		array := (*arrayType)(unsafe.Pointer(tt))
  2825  		if array.elem == typ {
  2826  			return cachePut(ckey, tt)
  2827  		}
  2828  	}
  2829  
  2830  	// Make an array type.
  2831  	var iarray interface{} = [1]unsafe.Pointer{}
  2832  	prototype := *(**arrayType)(unsafe.Pointer(&iarray))
  2833  	array := new(arrayType)
  2834  	*array = *prototype
  2835  	array.str = resolveReflectName(newName(s, "", "", false))
  2836  	array.hash = fnv1(typ.hash, '[')
  2837  	for n := uint32(count); n > 0; n >>= 8 {
  2838  		array.hash = fnv1(array.hash, byte(n))
  2839  	}
  2840  	array.hash = fnv1(array.hash, ']')
  2841  	array.elem = typ
  2842  	array.ptrToThis = 0
  2843  	max := ^uintptr(0) / typ.size
  2844  	if uintptr(count) > max {
  2845  		panic("reflect.ArrayOf: array size would exceed virtual address space")
  2846  	}
  2847  	array.size = typ.size * uintptr(count)
  2848  	if count > 0 && typ.ptrdata != 0 {
  2849  		array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
  2850  	}
  2851  	array.align = typ.align
  2852  	array.fieldAlign = typ.fieldAlign
  2853  	array.len = uintptr(count)
  2854  	array.slice = slice.(*rtype)
  2855  
  2856  	array.kind &^= kindNoPointers
  2857  	switch {
  2858  	case typ.kind&kindNoPointers != 0 || array.size == 0:
  2859  		// No pointers.
  2860  		array.kind |= kindNoPointers
  2861  		array.gcdata = nil
  2862  		array.ptrdata = 0
  2863  
  2864  	case count == 1:
  2865  		// In memory, 1-element array looks just like the element.
  2866  		array.kind |= typ.kind & kindGCProg
  2867  		array.gcdata = typ.gcdata
  2868  		array.ptrdata = typ.ptrdata
  2869  
  2870  	case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
  2871  		// Element is small with pointer mask; array is still small.
  2872  		// Create direct pointer mask by turning each 1 bit in elem
  2873  		// into count 1 bits in larger mask.
  2874  		mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
  2875  		elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
  2876  		elemWords := typ.size / ptrSize
  2877  		for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
  2878  			if (elemMask[j/8]>>(j%8))&1 != 0 {
  2879  				for i := uintptr(0); i < array.len; i++ {
  2880  					k := i*elemWords + j
  2881  					mask[k/8] |= 1 << (k % 8)
  2882  				}
  2883  			}
  2884  		}
  2885  		array.gcdata = &mask[0]
  2886  
  2887  	default:
  2888  		// Create program that emits one element
  2889  		// and then repeats to make the array.
  2890  		prog := []byte{0, 0, 0, 0} // will be length of prog
  2891  		elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
  2892  		elemPtrs := typ.ptrdata / ptrSize
  2893  		if typ.kind&kindGCProg == 0 {
  2894  			// Element is small with pointer mask; use as literal bits.
  2895  			mask := elemGC
  2896  			// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
  2897  			var n uintptr
  2898  			for n = elemPtrs; n > 120; n -= 120 {
  2899  				prog = append(prog, 120)
  2900  				prog = append(prog, mask[:15]...)
  2901  				mask = mask[15:]
  2902  			}
  2903  			prog = append(prog, byte(n))
  2904  			prog = append(prog, mask[:(n+7)/8]...)
  2905  		} else {
  2906  			// Element has GC program; emit one element.
  2907  			elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
  2908  			prog = append(prog, elemProg...)
  2909  		}
  2910  		// Pad from ptrdata to size.
  2911  		elemWords := typ.size / ptrSize
  2912  		if elemPtrs < elemWords {
  2913  			// Emit literal 0 bit, then repeat as needed.
  2914  			prog = append(prog, 0x01, 0x00)
  2915  			if elemPtrs+1 < elemWords {
  2916  				prog = append(prog, 0x81)
  2917  				prog = appendVarint(prog, elemWords-elemPtrs-1)
  2918  			}
  2919  		}
  2920  		// Repeat count-1 times.
  2921  		if elemWords < 0x80 {
  2922  			prog = append(prog, byte(elemWords|0x80))
  2923  		} else {
  2924  			prog = append(prog, 0x80)
  2925  			prog = appendVarint(prog, elemWords)
  2926  		}
  2927  		prog = appendVarint(prog, uintptr(count)-1)
  2928  		prog = append(prog, 0)
  2929  		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
  2930  		array.kind |= kindGCProg
  2931  		array.gcdata = &prog[0]
  2932  		array.ptrdata = array.size // overestimate but ok; must match program
  2933  	}
  2934  
  2935  	etyp := typ.common()
  2936  	esize := etyp.Size()
  2937  	ealg := etyp.alg
  2938  
  2939  	array.alg = new(typeAlg)
  2940  	if ealg.equal != nil {
  2941  		eequal := ealg.equal
  2942  		array.alg.equal = func(p, q unsafe.Pointer) bool {
  2943  			for i := 0; i < count; i++ {
  2944  				pi := arrayAt(p, i, esize)
  2945  				qi := arrayAt(q, i, esize)
  2946  				if !eequal(pi, qi) {
  2947  					return false
  2948  				}
  2949  
  2950  			}
  2951  			return true
  2952  		}
  2953  	}
  2954  	if ealg.hash != nil {
  2955  		ehash := ealg.hash
  2956  		array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
  2957  			o := seed
  2958  			for i := 0; i < count; i++ {
  2959  				o = ehash(arrayAt(ptr, i, esize), o)
  2960  			}
  2961  			return o
  2962  		}
  2963  	}
  2964  
  2965  	switch {
  2966  	case count == 1 && !ifaceIndir(typ):
  2967  		// array of 1 direct iface type can be direct
  2968  		array.kind |= kindDirectIface
  2969  	default:
  2970  		array.kind &^= kindDirectIface
  2971  	}
  2972  
  2973  	return cachePut(ckey, &array.rtype)
  2974  }
  2975  
  2976  func appendVarint(x []byte, v uintptr) []byte {
  2977  	for ; v >= 0x80; v >>= 7 {
  2978  		x = append(x, byte(v|0x80))
  2979  	}
  2980  	x = append(x, byte(v))
  2981  	return x
  2982  }
  2983  
  2984  // toType converts from a *rtype to a Type that can be returned
  2985  // to the client of package reflect. In gc, the only concern is that
  2986  // a nil *rtype must be replaced by a nil Type, but in gccgo this
  2987  // function takes care of ensuring that multiple *rtype for the same
  2988  // type are coalesced into a single Type.
  2989  func toType(t *rtype) Type {
  2990  	if t == nil {
  2991  		return nil
  2992  	}
  2993  	return t
  2994  }
  2995  
  2996  type layoutKey struct {
  2997  	t    *rtype // function signature
  2998  	rcvr *rtype // receiver type, or nil if none
  2999  }
  3000  
  3001  type layoutType struct {
  3002  	t         *rtype
  3003  	argSize   uintptr // size of arguments
  3004  	retOffset uintptr // offset of return values.
  3005  	stack     *bitVector
  3006  	framePool *sync.Pool
  3007  }
  3008  
  3009  var layoutCache struct {
  3010  	sync.RWMutex
  3011  	m map[layoutKey]layoutType
  3012  }
  3013  
  3014  // funcLayout computes a struct type representing the layout of the
  3015  // function arguments and return values for the function type t.
  3016  // If rcvr != nil, rcvr specifies the type of the receiver.
  3017  // The returned type exists only for GC, so we only fill out GC relevant info.
  3018  // Currently, that's just size and the GC program. We also fill in
  3019  // the name for possible debugging use.
  3020  func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) {
  3021  	if t.Kind() != Func {
  3022  		panic("reflect: funcLayout of non-func type")
  3023  	}
  3024  	if rcvr != nil && rcvr.Kind() == Interface {
  3025  		panic("reflect: funcLayout with interface receiver " + rcvr.String())
  3026  	}
  3027  	k := layoutKey{t, rcvr}
  3028  	layoutCache.RLock()
  3029  	if x := layoutCache.m[k]; x.t != nil {
  3030  		layoutCache.RUnlock()
  3031  		return x.t, x.argSize, x.retOffset, x.stack, x.framePool
  3032  	}
  3033  	layoutCache.RUnlock()
  3034  	layoutCache.Lock()
  3035  	if x := layoutCache.m[k]; x.t != nil {
  3036  		layoutCache.Unlock()
  3037  		return x.t, x.argSize, x.retOffset, x.stack, x.framePool
  3038  	}
  3039  
  3040  	tt := (*funcType)(unsafe.Pointer(t))
  3041  
  3042  	// compute gc program & stack bitmap for arguments
  3043  	ptrmap := new(bitVector)
  3044  	var offset uintptr
  3045  	if rcvr != nil {
  3046  		// Reflect uses the "interface" calling convention for
  3047  		// methods, where receivers take one word of argument
  3048  		// space no matter how big they actually are.
  3049  		if ifaceIndir(rcvr) || rcvr.pointers() {
  3050  			ptrmap.append(1)
  3051  		}
  3052  		offset += ptrSize
  3053  	}
  3054  	for _, arg := range tt.in() {
  3055  		offset += -offset & uintptr(arg.align-1)
  3056  		addTypeBits(ptrmap, offset, arg)
  3057  		offset += arg.size
  3058  	}
  3059  	argN := ptrmap.n
  3060  	argSize = offset
  3061  	if runtime.GOARCH == "amd64p32" {
  3062  		offset += -offset & (8 - 1)
  3063  	}
  3064  	offset += -offset & (ptrSize - 1)
  3065  	retOffset = offset
  3066  	for _, res := range tt.out() {
  3067  		offset += -offset & uintptr(res.align-1)
  3068  		addTypeBits(ptrmap, offset, res)
  3069  		offset += res.size
  3070  	}
  3071  	offset += -offset & (ptrSize - 1)
  3072  
  3073  	// build dummy rtype holding gc program
  3074  	x := new(rtype)
  3075  	x.align = ptrSize
  3076  	if runtime.GOARCH == "amd64p32" {
  3077  		x.align = 8
  3078  	}
  3079  	x.size = offset
  3080  	x.ptrdata = uintptr(ptrmap.n) * ptrSize
  3081  	if ptrmap.n > 0 {
  3082  		x.gcdata = &ptrmap.data[0]
  3083  	} else {
  3084  		x.kind |= kindNoPointers
  3085  	}
  3086  	ptrmap.n = argN
  3087  
  3088  	var s string
  3089  	if rcvr != nil {
  3090  		s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
  3091  	} else {
  3092  		s = "funcargs(" + t.String() + ")"
  3093  	}
  3094  	x.str = resolveReflectName(newName(s, "", "", false))
  3095  
  3096  	// cache result for future callers
  3097  	if layoutCache.m == nil {
  3098  		layoutCache.m = make(map[layoutKey]layoutType)
  3099  	}
  3100  	framePool = &sync.Pool{New: func() interface{} {
  3101  		return unsafe_New(x)
  3102  	}}
  3103  	layoutCache.m[k] = layoutType{
  3104  		t:         x,
  3105  		argSize:   argSize,
  3106  		retOffset: retOffset,
  3107  		stack:     ptrmap,
  3108  		framePool: framePool,
  3109  	}
  3110  	layoutCache.Unlock()
  3111  	return x, argSize, retOffset, ptrmap, framePool
  3112  }
  3113  
  3114  // ifaceIndir reports whether t is stored indirectly in an interface value.
  3115  func ifaceIndir(t *rtype) bool {
  3116  	return t.kind&kindDirectIface == 0
  3117  }
  3118  
  3119  // Layout matches runtime.BitVector (well enough).
  3120  type bitVector struct {
  3121  	n    uint32 // number of bits
  3122  	data []byte
  3123  }
  3124  
  3125  // append a bit to the bitmap.
  3126  func (bv *bitVector) append(bit uint8) {
  3127  	if bv.n%8 == 0 {
  3128  		bv.data = append(bv.data, 0)
  3129  	}
  3130  	bv.data[bv.n/8] |= bit << (bv.n % 8)
  3131  	bv.n++
  3132  }
  3133  
  3134  func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
  3135  	if t.kind&kindNoPointers != 0 {
  3136  		return
  3137  	}
  3138  
  3139  	switch Kind(t.kind & kindMask) {
  3140  	case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
  3141  		// 1 pointer at start of representation
  3142  		for bv.n < uint32(offset/uintptr(ptrSize)) {
  3143  			bv.append(0)
  3144  		}
  3145  		bv.append(1)
  3146  
  3147  	case Interface:
  3148  		// 2 pointers
  3149  		for bv.n < uint32(offset/uintptr(ptrSize)) {
  3150  			bv.append(0)
  3151  		}
  3152  		bv.append(1)
  3153  		bv.append(1)
  3154  
  3155  	case Array:
  3156  		// repeat inner type
  3157  		tt := (*arrayType)(unsafe.Pointer(t))
  3158  		for i := 0; i < int(tt.len); i++ {
  3159  			addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
  3160  		}
  3161  
  3162  	case Struct:
  3163  		// apply fields
  3164  		tt := (*structType)(unsafe.Pointer(t))
  3165  		for i := range tt.fields {
  3166  			f := &tt.fields[i]
  3167  			addTypeBits(bv, offset+f.offset, f.typ)
  3168  		}
  3169  	}
  3170  }