github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "internal/goarch" 20 "internal/unsafeheader" 21 "strconv" 22 "sync" 23 "unicode" 24 "unicode/utf8" 25 "unsafe" 26 ) 27 28 // Type is the representation of a Go type. 29 // 30 // Not all methods apply to all kinds of types. Restrictions, 31 // if any, are noted in the documentation for each method. 32 // Use the Kind method to find out the kind of type before 33 // calling kind-specific methods. Calling a method 34 // inappropriate to the kind of type causes a run-time panic. 35 // 36 // Type values are comparable, such as with the == operator, 37 // so they can be used as map keys. 38 // Two Type values are equal if they represent identical types. 39 type Type interface { 40 // Methods applicable to all types. 41 42 // Align returns the alignment in bytes of a value of 43 // this type when allocated in memory. 44 Align() int 45 46 // FieldAlign returns the alignment in bytes of a value of 47 // this type when used as a field in a struct. 48 FieldAlign() int 49 50 // Method returns the i'th method in the type's method set. 51 // It panics if i is not in the range [0, NumMethod()). 52 // 53 // For a non-interface type T or *T, the returned Method's Type and Func 54 // fields describe a function whose first argument is the receiver, 55 // and only exported methods are accessible. 56 // 57 // For an interface type, the returned Method's Type field gives the 58 // method signature, without a receiver, and the Func field is nil. 59 // 60 // Methods are sorted in lexicographic order. 61 Method(int) Method 62 63 // MethodByName returns the method with that name in the type's 64 // method set and a boolean indicating if the method was found. 65 // 66 // For a non-interface type T or *T, the returned Method's Type and Func 67 // fields describe a function whose first argument is the receiver. 68 // 69 // For an interface type, the returned Method's Type field gives the 70 // method signature, without a receiver, and the Func field is nil. 71 MethodByName(string) (Method, bool) 72 73 // NumMethod returns the number of methods accessible using Method. 74 // 75 // Note that NumMethod counts unexported methods only for interface types. 76 NumMethod() int 77 78 // Name returns the type's name within its package for a defined type. 79 // For other (non-defined) types it returns the empty string. 80 Name() string 81 82 // PkgPath returns a defined type's package path, that is, the import path 83 // that uniquely identifies the package, such as "encoding/base64". 84 // If the type was predeclared (string, error) or not defined (*T, struct{}, 85 // []int, or A where A is an alias for a non-defined type), the package path 86 // will be the empty string. 87 PkgPath() string 88 89 // Size returns the number of bytes needed to store 90 // a value of the given type; it is analogous to unsafe.Sizeof. 91 Size() uintptr 92 93 // String returns a string representation of the type. 94 // The string representation may use shortened package names 95 // (e.g., base64 instead of "encoding/base64") and is not 96 // guaranteed to be unique among types. To test for type identity, 97 // compare the Types directly. 98 String() string 99 100 // Kind returns the specific kind of this type. 101 Kind() Kind 102 103 // Implements reports whether the type implements the interface type u. 104 Implements(u Type) bool 105 106 // AssignableTo reports whether a value of the type is assignable to type u. 107 AssignableTo(u Type) bool 108 109 // ConvertibleTo reports whether a value of the type is convertible to type u. 110 // Even if ConvertibleTo returns true, the conversion may still panic. 111 // For example, a slice of type []T is convertible to *[N]T, 112 // but the conversion will panic if its length is less than N. 113 ConvertibleTo(u Type) bool 114 115 // Comparable reports whether values of this type are comparable. 116 // Even if Comparable returns true, the comparison may still panic. 117 // For example, values of interface type are comparable, 118 // but the comparison will panic if their dynamic type is not comparable. 119 Comparable() bool 120 121 // Methods applicable only to some types, depending on Kind. 122 // The methods allowed for each kind are: 123 // 124 // Int*, Uint*, Float*, Complex*: Bits 125 // Array: Elem, Len 126 // Chan: ChanDir, Elem 127 // Func: In, NumIn, Out, NumOut, IsVariadic. 128 // Map: Key, Elem 129 // Pointer: Elem 130 // Slice: Elem 131 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 132 133 // Bits returns the size of the type in bits. 134 // It panics if the type's Kind is not one of the 135 // sized or unsized Int, Uint, Float, or Complex kinds. 136 Bits() int 137 138 // ChanDir returns a channel type's direction. 139 // It panics if the type's Kind is not Chan. 140 ChanDir() ChanDir 141 142 // IsVariadic reports whether a function type's final input parameter 143 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 144 // implicit actual type []T. 145 // 146 // For concreteness, if t represents func(x int, y ... float64), then 147 // 148 // t.NumIn() == 2 149 // t.In(0) is the reflect.Type for "int" 150 // t.In(1) is the reflect.Type for "[]float64" 151 // t.IsVariadic() == true 152 // 153 // IsVariadic panics if the type's Kind is not Func. 154 IsVariadic() bool 155 156 // Elem returns a type's element type. 157 // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. 158 Elem() Type 159 160 // Field returns a struct type's i'th field. 161 // It panics if the type's Kind is not Struct. 162 // It panics if i is not in the range [0, NumField()). 163 Field(i int) StructField 164 165 // FieldByIndex returns the nested field corresponding 166 // to the index sequence. It is equivalent to calling Field 167 // successively for each index i. 168 // It panics if the type's Kind is not Struct. 169 FieldByIndex(index []int) StructField 170 171 // FieldByName returns the struct field with the given name 172 // and a boolean indicating if the field was found. 173 FieldByName(name string) (StructField, bool) 174 175 // FieldByNameFunc returns the struct field with a name 176 // that satisfies the match function and a boolean indicating if 177 // the field was found. 178 // 179 // FieldByNameFunc considers the fields in the struct itself 180 // and then the fields in any embedded structs, in breadth first order, 181 // stopping at the shallowest nesting depth containing one or more 182 // fields satisfying the match function. If multiple fields at that depth 183 // satisfy the match function, they cancel each other 184 // and FieldByNameFunc returns no match. 185 // This behavior mirrors Go's handling of name lookup in 186 // structs containing embedded fields. 187 FieldByNameFunc(match func(string) bool) (StructField, bool) 188 189 // In returns the type of a function type's i'th input parameter. 190 // It panics if the type's Kind is not Func. 191 // It panics if i is not in the range [0, NumIn()). 192 In(i int) Type 193 194 // Key returns a map type's key type. 195 // It panics if the type's Kind is not Map. 196 Key() Type 197 198 // Len returns an array type's length. 199 // It panics if the type's Kind is not Array. 200 Len() int 201 202 // NumField returns a struct type's field count. 203 // It panics if the type's Kind is not Struct. 204 NumField() int 205 206 // NumIn returns a function type's input parameter count. 207 // It panics if the type's Kind is not Func. 208 NumIn() int 209 210 // NumOut returns a function type's output parameter count. 211 // It panics if the type's Kind is not Func. 212 NumOut() int 213 214 // Out returns the type of a function type's i'th output parameter. 215 // It panics if the type's Kind is not Func. 216 // It panics if i is not in the range [0, NumOut()). 217 Out(i int) Type 218 219 common() *rtype 220 uncommon() *uncommonType 221 } 222 223 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 224 // if the names are equal, even if they are unexported names originating 225 // in different packages. The practical effect of this is that the result of 226 // t.FieldByName("x") is not well defined if the struct type t contains 227 // multiple fields named x (embedded from different packages). 228 // FieldByName may return one of the fields named x or may report that there are none. 229 // See https://golang.org/issue/4876 for more details. 230 231 /* 232 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). 233 * A few are known to ../runtime/type.go to convey to debuggers. 234 * They are also known to ../runtime/type.go. 235 */ 236 237 // A Kind represents the specific kind of type that a Type represents. 238 // The zero Kind is not a valid kind. 239 type Kind uint 240 241 const ( 242 Invalid Kind = iota 243 Bool 244 Int 245 Int8 246 Int16 247 Int32 248 Int64 249 Uint 250 Uint8 251 Uint16 252 Uint32 253 Uint64 254 Uintptr 255 Float32 256 Float64 257 Complex64 258 Complex128 259 Array 260 Chan 261 Func 262 Interface 263 Map 264 Pointer 265 Slice 266 String 267 Struct 268 UnsafePointer 269 ) 270 271 // Ptr is the old name for the Pointer kind. 272 const Ptr = Pointer 273 274 // tflag is used by an rtype to signal what extra type information is 275 // available in the memory directly following the rtype value. 276 // 277 // tflag values must be kept in sync with copies in: 278 // cmd/compile/internal/reflectdata/reflect.go 279 // cmd/link/internal/ld/decodesym.go 280 // runtime/type.go 281 type tflag uint8 282 283 const ( 284 // tflagUncommon means that there is a pointer, *uncommonType, 285 // just beyond the outer type structure. 286 // 287 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 288 // then t has uncommonType data and it can be accessed as: 289 // 290 // type tUncommon struct { 291 // structType 292 // u uncommonType 293 // } 294 // u := &(*tUncommon)(unsafe.Pointer(t)).u 295 tflagUncommon tflag = 1 << 0 296 297 // tflagExtraStar means the name in the str field has an 298 // extraneous '*' prefix. This is because for most types T in 299 // a program, the type *T also exists and reusing the str data 300 // saves binary size. 301 tflagExtraStar tflag = 1 << 1 302 303 // tflagNamed means the type has a name. 304 tflagNamed tflag = 1 << 2 305 306 // tflagRegularMemory means that equal and hash functions can treat 307 // this type as a single region of t.size bytes. 308 tflagRegularMemory tflag = 1 << 3 309 ) 310 311 // rtype is the common implementation of most values. 312 // It is embedded in other struct types. 313 // 314 // rtype must be kept in sync with ../runtime/type.go:/^type._type. 315 type rtype struct { 316 size uintptr 317 ptrdata uintptr // number of bytes in the type that can contain pointers 318 hash uint32 // hash of type; avoids computation in hash tables 319 tflag tflag // extra type information flags 320 align uint8 // alignment of variable with this type 321 fieldAlign uint8 // alignment of struct field with this type 322 kind uint8 // enumeration for C 323 // function for comparing objects of this type 324 // (ptr to object A, ptr to object B) -> ==? 325 equal func(unsafe.Pointer, unsafe.Pointer) bool 326 gcdata *byte // garbage collection data 327 str nameOff // string form 328 ptrToThis typeOff // type for pointer to this type, may be zero 329 } 330 331 // Method on non-interface type 332 type method struct { 333 name nameOff // name of method 334 mtyp typeOff // method type (without receiver) 335 ifn textOff // fn used in interface call (one-word receiver) 336 tfn textOff // fn used for normal method call 337 } 338 339 // uncommonType is present only for defined types or types with methods 340 // (if T is a defined type, the uncommonTypes for T and *T have methods). 341 // Using a pointer to this struct reduces the overall size required 342 // to describe a non-defined type with no methods. 343 type uncommonType struct { 344 pkgPath nameOff // import path; empty for built-in types like int, string 345 mcount uint16 // number of methods 346 xcount uint16 // number of exported methods 347 moff uint32 // offset from this uncommontype to [mcount]method 348 _ uint32 // unused 349 } 350 351 // ChanDir represents a channel type's direction. 352 type ChanDir int 353 354 const ( 355 RecvDir ChanDir = 1 << iota // <-chan 356 SendDir // chan<- 357 BothDir = RecvDir | SendDir // chan 358 ) 359 360 // arrayType represents a fixed array type. 361 type arrayType struct { 362 rtype 363 elem *rtype // array element type 364 slice *rtype // slice type 365 len uintptr 366 } 367 368 // chanType represents a channel type. 369 type chanType struct { 370 rtype 371 elem *rtype // channel element type 372 dir uintptr // channel direction (ChanDir) 373 } 374 375 // funcType represents a function type. 376 // 377 // A *rtype for each in and out parameter is stored in an array that 378 // directly follows the funcType (and possibly its uncommonType). So 379 // a function type with one method, one input, and one output is: 380 // 381 // struct { 382 // funcType 383 // uncommonType 384 // [2]*rtype // [0] is in, [1] is out 385 // } 386 type funcType struct { 387 rtype 388 inCount uint16 389 outCount uint16 // top bit is set if last input parameter is ... 390 } 391 392 // imethod represents a method on an interface type 393 type imethod struct { 394 name nameOff // name of method 395 typ typeOff // .(*FuncType) underneath 396 } 397 398 // interfaceType represents an interface type. 399 type interfaceType struct { 400 rtype 401 pkgPath name // import path 402 methods []imethod // sorted by hash 403 } 404 405 // mapType represents a map type. 406 type mapType struct { 407 rtype 408 key *rtype // map key type 409 elem *rtype // map element (value) type 410 bucket *rtype // internal bucket structure 411 // function for hashing keys (ptr to key, seed) -> hash 412 hasher func(unsafe.Pointer, uintptr) uintptr 413 keysize uint8 // size of key slot 414 valuesize uint8 // size of value slot 415 bucketsize uint16 // size of bucket 416 flags uint32 417 } 418 419 // ptrType represents a pointer type. 420 type ptrType struct { 421 rtype 422 elem *rtype // pointer element (pointed at) type 423 } 424 425 // sliceType represents a slice type. 426 type sliceType struct { 427 rtype 428 elem *rtype // slice element type 429 } 430 431 // Struct field 432 type structField struct { 433 name name // name is always non-empty 434 typ *rtype // type of field 435 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded 436 } 437 438 func (f *structField) offset() uintptr { 439 return f.offsetEmbed >> 1 440 } 441 442 func (f *structField) embedded() bool { 443 return f.offsetEmbed&1 != 0 444 } 445 446 // structType represents a struct type. 447 type structType struct { 448 rtype 449 pkgPath name 450 fields []structField // sorted by offset 451 } 452 453 // name is an encoded type name with optional extra data. 454 // 455 // The first byte is a bit field containing: 456 // 457 // 1<<0 the name is exported 458 // 1<<1 tag data follows the name 459 // 1<<2 pkgPath nameOff follows the name and tag 460 // 461 // Following that, there is a varint-encoded length of the name, 462 // followed by the name itself. 463 // 464 // If tag data is present, it also has a varint-encoded length 465 // followed by the tag itself. 466 // 467 // If the import path follows, then 4 bytes at the end of 468 // the data form a nameOff. The import path is only set for concrete 469 // methods that are defined in a different package than their type. 470 // 471 // If a name starts with "*", then the exported bit represents 472 // whether the pointed to type is exported. 473 // 474 // Note: this encoding must match here and in: 475 // cmd/compile/internal/reflectdata/reflect.go 476 // runtime/type.go 477 // internal/reflectlite/type.go 478 // cmd/link/internal/ld/decodesym.go 479 480 type name struct { 481 bytes *byte 482 } 483 484 func (n name) data(off int, whySafe string) *byte { 485 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe)) 486 } 487 488 func (n name) isExported() bool { 489 return (*n.bytes)&(1<<0) != 0 490 } 491 492 func (n name) hasTag() bool { 493 return (*n.bytes)&(1<<1) != 0 494 } 495 496 // readVarint parses a varint as encoded by encoding/binary. 497 // It returns the number of encoded bytes and the encoded value. 498 func (n name) readVarint(off int) (int, int) { 499 v := 0 500 for i := 0; ; i++ { 501 x := *n.data(off+i, "read varint") 502 v += int(x&0x7f) << (7 * i) 503 if x&0x80 == 0 { 504 return i + 1, v 505 } 506 } 507 } 508 509 // writeVarint writes n to buf in varint form. Returns the 510 // number of bytes written. n must be nonnegative. 511 // Writes at most 10 bytes. 512 func writeVarint(buf []byte, n int) int { 513 for i := 0; ; i++ { 514 b := byte(n & 0x7f) 515 n >>= 7 516 if n == 0 { 517 buf[i] = b 518 return i + 1 519 } 520 buf[i] = b | 0x80 521 } 522 } 523 524 func (n name) name() (s string) { 525 if n.bytes == nil { 526 return 527 } 528 i, l := n.readVarint(1) 529 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 530 hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string")) 531 hdr.Len = l 532 return 533 } 534 535 func (n name) tag() (s string) { 536 if !n.hasTag() { 537 return "" 538 } 539 i, l := n.readVarint(1) 540 i2, l2 := n.readVarint(1 + i + l) 541 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 542 hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string")) 543 hdr.Len = l2 544 return 545 } 546 547 func (n name) pkgPath() string { 548 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 { 549 return "" 550 } 551 i, l := n.readVarint(1) 552 off := 1 + i + l 553 if n.hasTag() { 554 i2, l2 := n.readVarint(off) 555 off += i2 + l2 556 } 557 var nameOff int32 558 // Note that this field may not be aligned in memory, 559 // so we cannot use a direct int32 assignment here. 560 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:]) 561 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 562 return pkgPathName.name() 563 } 564 565 func newName(n, tag string, exported bool) name { 566 if len(n) >= 1<<29 { 567 panic("reflect.nameFrom: name too long: " + n[:1024] + "...") 568 } 569 if len(tag) >= 1<<29 { 570 panic("reflect.nameFrom: tag too long: " + tag[:1024] + "...") 571 } 572 var nameLen [10]byte 573 var tagLen [10]byte 574 nameLenLen := writeVarint(nameLen[:], len(n)) 575 tagLenLen := writeVarint(tagLen[:], len(tag)) 576 577 var bits byte 578 l := 1 + nameLenLen + len(n) 579 if exported { 580 bits |= 1 << 0 581 } 582 if len(tag) > 0 { 583 l += tagLenLen + len(tag) 584 bits |= 1 << 1 585 } 586 587 b := make([]byte, l) 588 b[0] = bits 589 copy(b[1:], nameLen[:nameLenLen]) 590 copy(b[1+nameLenLen:], n) 591 if len(tag) > 0 { 592 tb := b[1+nameLenLen+len(n):] 593 copy(tb, tagLen[:tagLenLen]) 594 copy(tb[tagLenLen:], tag) 595 } 596 597 return name{bytes: &b[0]} 598 } 599 600 /* 601 * The compiler knows the exact layout of all the data structures above. 602 * The compiler does not know about the data structures and methods below. 603 */ 604 605 // Method represents a single method. 606 type Method struct { 607 // Name is the method name. 608 Name string 609 610 // PkgPath is the package path that qualifies a lower case (unexported) 611 // method name. It is empty for upper case (exported) method names. 612 // The combination of PkgPath and Name uniquely identifies a method 613 // in a method set. 614 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 615 PkgPath string 616 617 Type Type // method type 618 Func Value // func with receiver as first argument 619 Index int // index for Type.Method 620 } 621 622 // IsExported reports whether the method is exported. 623 func (m Method) IsExported() bool { 624 return m.PkgPath == "" 625 } 626 627 const ( 628 kindDirectIface = 1 << 5 629 kindGCProg = 1 << 6 // Type.gc points to GC program 630 kindMask = (1 << 5) - 1 631 ) 632 633 // String returns the name of k. 634 func (k Kind) String() string { 635 if int(k) < len(kindNames) { 636 return kindNames[k] 637 } 638 return "kind" + strconv.Itoa(int(k)) 639 } 640 641 var kindNames = []string{ 642 Invalid: "invalid", 643 Bool: "bool", 644 Int: "int", 645 Int8: "int8", 646 Int16: "int16", 647 Int32: "int32", 648 Int64: "int64", 649 Uint: "uint", 650 Uint8: "uint8", 651 Uint16: "uint16", 652 Uint32: "uint32", 653 Uint64: "uint64", 654 Uintptr: "uintptr", 655 Float32: "float32", 656 Float64: "float64", 657 Complex64: "complex64", 658 Complex128: "complex128", 659 Array: "array", 660 Chan: "chan", 661 Func: "func", 662 Interface: "interface", 663 Map: "map", 664 Pointer: "ptr", 665 Slice: "slice", 666 String: "string", 667 Struct: "struct", 668 UnsafePointer: "unsafe.Pointer", 669 } 670 671 func (t *uncommonType) methods() []method { 672 if t.mcount == 0 { 673 return nil 674 } 675 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] 676 } 677 678 func (t *uncommonType) exportedMethods() []method { 679 if t.xcount == 0 { 680 return nil 681 } 682 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount] 683 } 684 685 // resolveNameOff resolves a name offset from a base pointer. 686 // The (*rtype).nameOff method is a convenience wrapper for this function. 687 // Implemented in the runtime package. 688 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 689 690 // resolveTypeOff resolves an *rtype offset from a base type. 691 // The (*rtype).typeOff method is a convenience wrapper for this function. 692 // Implemented in the runtime package. 693 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 694 695 // resolveTextOff resolves a function pointer offset from a base type. 696 // The (*rtype).textOff method is a convenience wrapper for this function. 697 // Implemented in the runtime package. 698 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 699 700 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 701 // It returns a new ID that can be used as a typeOff or textOff, and will 702 // be resolved correctly. Implemented in the runtime package. 703 func addReflectOff(ptr unsafe.Pointer) int32 704 705 // resolveReflectName adds a name to the reflection lookup map in the runtime. 706 // It returns a new nameOff that can be used to refer to the pointer. 707 func resolveReflectName(n name) nameOff { 708 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 709 } 710 711 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 712 // It returns a new typeOff that can be used to refer to the pointer. 713 func resolveReflectType(t *rtype) typeOff { 714 return typeOff(addReflectOff(unsafe.Pointer(t))) 715 } 716 717 // resolveReflectText adds a function pointer to the reflection lookup map in 718 // the runtime. It returns a new textOff that can be used to refer to the 719 // pointer. 720 func resolveReflectText(ptr unsafe.Pointer) textOff { 721 return textOff(addReflectOff(ptr)) 722 } 723 724 type nameOff int32 // offset to a name 725 type typeOff int32 // offset to an *rtype 726 type textOff int32 // offset from top of text section 727 728 func (t *rtype) nameOff(off nameOff) name { 729 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 730 } 731 732 func (t *rtype) typeOff(off typeOff) *rtype { 733 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 734 } 735 736 func (t *rtype) textOff(off textOff) unsafe.Pointer { 737 return resolveTextOff(unsafe.Pointer(t), int32(off)) 738 } 739 740 func (t *rtype) uncommon() *uncommonType { 741 if t.tflag&tflagUncommon == 0 { 742 return nil 743 } 744 switch t.Kind() { 745 case Struct: 746 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 747 case Pointer: 748 type u struct { 749 ptrType 750 u uncommonType 751 } 752 return &(*u)(unsafe.Pointer(t)).u 753 case Func: 754 type u struct { 755 funcType 756 u uncommonType 757 } 758 return &(*u)(unsafe.Pointer(t)).u 759 case Slice: 760 type u struct { 761 sliceType 762 u uncommonType 763 } 764 return &(*u)(unsafe.Pointer(t)).u 765 case Array: 766 type u struct { 767 arrayType 768 u uncommonType 769 } 770 return &(*u)(unsafe.Pointer(t)).u 771 case Chan: 772 type u struct { 773 chanType 774 u uncommonType 775 } 776 return &(*u)(unsafe.Pointer(t)).u 777 case Map: 778 type u struct { 779 mapType 780 u uncommonType 781 } 782 return &(*u)(unsafe.Pointer(t)).u 783 case Interface: 784 type u struct { 785 interfaceType 786 u uncommonType 787 } 788 return &(*u)(unsafe.Pointer(t)).u 789 default: 790 type u struct { 791 rtype 792 u uncommonType 793 } 794 return &(*u)(unsafe.Pointer(t)).u 795 } 796 } 797 798 func (t *rtype) String() string { 799 s := t.nameOff(t.str).name() 800 if t.tflag&tflagExtraStar != 0 { 801 return s[1:] 802 } 803 return s 804 } 805 806 func (t *rtype) Size() uintptr { return t.size } 807 808 func (t *rtype) Bits() int { 809 if t == nil { 810 panic("reflect: Bits of nil Type") 811 } 812 k := t.Kind() 813 if k < Int || k > Complex128 { 814 panic("reflect: Bits of non-arithmetic Type " + t.String()) 815 } 816 return int(t.size) * 8 817 } 818 819 func (t *rtype) Align() int { return int(t.align) } 820 821 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 822 823 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 824 825 func (t *rtype) pointers() bool { return t.ptrdata != 0 } 826 827 func (t *rtype) common() *rtype { return t } 828 829 func (t *rtype) exportedMethods() []method { 830 ut := t.uncommon() 831 if ut == nil { 832 return nil 833 } 834 return ut.exportedMethods() 835 } 836 837 func (t *rtype) NumMethod() int { 838 if t.Kind() == Interface { 839 tt := (*interfaceType)(unsafe.Pointer(t)) 840 return tt.NumMethod() 841 } 842 return len(t.exportedMethods()) 843 } 844 845 func (t *rtype) Method(i int) (m Method) { 846 if t.Kind() == Interface { 847 tt := (*interfaceType)(unsafe.Pointer(t)) 848 return tt.Method(i) 849 } 850 methods := t.exportedMethods() 851 if i < 0 || i >= len(methods) { 852 panic("reflect: Method index out of range") 853 } 854 p := methods[i] 855 pname := t.nameOff(p.name) 856 m.Name = pname.name() 857 fl := flag(Func) 858 mtyp := t.typeOff(p.mtyp) 859 ft := (*funcType)(unsafe.Pointer(mtyp)) 860 in := make([]Type, 0, 1+len(ft.in())) 861 in = append(in, t) 862 for _, arg := range ft.in() { 863 in = append(in, arg) 864 } 865 out := make([]Type, 0, len(ft.out())) 866 for _, ret := range ft.out() { 867 out = append(out, ret) 868 } 869 mt := FuncOf(in, out, ft.IsVariadic()) 870 m.Type = mt 871 tfn := t.textOff(p.tfn) 872 fn := unsafe.Pointer(&tfn) 873 m.Func = Value{mt.(*rtype), fn, fl} 874 875 m.Index = i 876 return m 877 } 878 879 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 880 if t.Kind() == Interface { 881 tt := (*interfaceType)(unsafe.Pointer(t)) 882 return tt.MethodByName(name) 883 } 884 ut := t.uncommon() 885 if ut == nil { 886 return Method{}, false 887 } 888 // TODO(mdempsky): Binary search. 889 for i, p := range ut.exportedMethods() { 890 if t.nameOff(p.name).name() == name { 891 return t.Method(i), true 892 } 893 } 894 return Method{}, false 895 } 896 897 func (t *rtype) PkgPath() string { 898 if t.tflag&tflagNamed == 0 { 899 return "" 900 } 901 ut := t.uncommon() 902 if ut == nil { 903 return "" 904 } 905 return t.nameOff(ut.pkgPath).name() 906 } 907 908 func (t *rtype) hasName() bool { 909 return t.tflag&tflagNamed != 0 910 } 911 912 func (t *rtype) Name() string { 913 if !t.hasName() { 914 return "" 915 } 916 s := t.String() 917 i := len(s) - 1 918 sqBrackets := 0 919 for i >= 0 && (s[i] != '.' || sqBrackets != 0) { 920 switch s[i] { 921 case ']': 922 sqBrackets++ 923 case '[': 924 sqBrackets-- 925 } 926 i-- 927 } 928 return s[i+1:] 929 } 930 931 func (t *rtype) ChanDir() ChanDir { 932 if t.Kind() != Chan { 933 panic("reflect: ChanDir of non-chan type " + t.String()) 934 } 935 tt := (*chanType)(unsafe.Pointer(t)) 936 return ChanDir(tt.dir) 937 } 938 939 func (t *rtype) IsVariadic() bool { 940 if t.Kind() != Func { 941 panic("reflect: IsVariadic of non-func type " + t.String()) 942 } 943 tt := (*funcType)(unsafe.Pointer(t)) 944 return tt.outCount&(1<<15) != 0 945 } 946 947 func (t *rtype) Elem() Type { 948 switch t.Kind() { 949 case Array: 950 tt := (*arrayType)(unsafe.Pointer(t)) 951 return toType(tt.elem) 952 case Chan: 953 tt := (*chanType)(unsafe.Pointer(t)) 954 return toType(tt.elem) 955 case Map: 956 tt := (*mapType)(unsafe.Pointer(t)) 957 return toType(tt.elem) 958 case Pointer: 959 tt := (*ptrType)(unsafe.Pointer(t)) 960 return toType(tt.elem) 961 case Slice: 962 tt := (*sliceType)(unsafe.Pointer(t)) 963 return toType(tt.elem) 964 } 965 panic("reflect: Elem of invalid type " + t.String()) 966 } 967 968 func (t *rtype) Field(i int) StructField { 969 if t.Kind() != Struct { 970 panic("reflect: Field of non-struct type " + t.String()) 971 } 972 tt := (*structType)(unsafe.Pointer(t)) 973 return tt.Field(i) 974 } 975 976 func (t *rtype) FieldByIndex(index []int) StructField { 977 if t.Kind() != Struct { 978 panic("reflect: FieldByIndex of non-struct type " + t.String()) 979 } 980 tt := (*structType)(unsafe.Pointer(t)) 981 return tt.FieldByIndex(index) 982 } 983 984 func (t *rtype) FieldByName(name string) (StructField, bool) { 985 if t.Kind() != Struct { 986 panic("reflect: FieldByName of non-struct type " + t.String()) 987 } 988 tt := (*structType)(unsafe.Pointer(t)) 989 return tt.FieldByName(name) 990 } 991 992 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 993 if t.Kind() != Struct { 994 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 995 } 996 tt := (*structType)(unsafe.Pointer(t)) 997 return tt.FieldByNameFunc(match) 998 } 999 1000 func (t *rtype) In(i int) Type { 1001 if t.Kind() != Func { 1002 panic("reflect: In of non-func type " + t.String()) 1003 } 1004 tt := (*funcType)(unsafe.Pointer(t)) 1005 return toType(tt.in()[i]) 1006 } 1007 1008 func (t *rtype) Key() Type { 1009 if t.Kind() != Map { 1010 panic("reflect: Key of non-map type " + t.String()) 1011 } 1012 tt := (*mapType)(unsafe.Pointer(t)) 1013 return toType(tt.key) 1014 } 1015 1016 func (t *rtype) Len() int { 1017 if t.Kind() != Array { 1018 panic("reflect: Len of non-array type " + t.String()) 1019 } 1020 tt := (*arrayType)(unsafe.Pointer(t)) 1021 return int(tt.len) 1022 } 1023 1024 func (t *rtype) NumField() int { 1025 if t.Kind() != Struct { 1026 panic("reflect: NumField of non-struct type " + t.String()) 1027 } 1028 tt := (*structType)(unsafe.Pointer(t)) 1029 return len(tt.fields) 1030 } 1031 1032 func (t *rtype) NumIn() int { 1033 if t.Kind() != Func { 1034 panic("reflect: NumIn of non-func type " + t.String()) 1035 } 1036 tt := (*funcType)(unsafe.Pointer(t)) 1037 return int(tt.inCount) 1038 } 1039 1040 func (t *rtype) NumOut() int { 1041 if t.Kind() != Func { 1042 panic("reflect: NumOut of non-func type " + t.String()) 1043 } 1044 tt := (*funcType)(unsafe.Pointer(t)) 1045 return len(tt.out()) 1046 } 1047 1048 func (t *rtype) Out(i int) Type { 1049 if t.Kind() != Func { 1050 panic("reflect: Out of non-func type " + t.String()) 1051 } 1052 tt := (*funcType)(unsafe.Pointer(t)) 1053 return toType(tt.out()[i]) 1054 } 1055 1056 func (t *funcType) in() []*rtype { 1057 uadd := unsafe.Sizeof(*t) 1058 if t.tflag&tflagUncommon != 0 { 1059 uadd += unsafe.Sizeof(uncommonType{}) 1060 } 1061 if t.inCount == 0 { 1062 return nil 1063 } 1064 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount] 1065 } 1066 1067 func (t *funcType) out() []*rtype { 1068 uadd := unsafe.Sizeof(*t) 1069 if t.tflag&tflagUncommon != 0 { 1070 uadd += unsafe.Sizeof(uncommonType{}) 1071 } 1072 outCount := t.outCount & (1<<15 - 1) 1073 if outCount == 0 { 1074 return nil 1075 } 1076 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount] 1077 } 1078 1079 // add returns p+x. 1080 // 1081 // The whySafe string is ignored, so that the function still inlines 1082 // as efficiently as p+x, but all call sites should use the string to 1083 // record why the addition is safe, which is to say why the addition 1084 // does not cause x to advance to the very end of p's allocation 1085 // and therefore point incorrectly at the next block in memory. 1086 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 1087 return unsafe.Pointer(uintptr(p) + x) 1088 } 1089 1090 func (d ChanDir) String() string { 1091 switch d { 1092 case SendDir: 1093 return "chan<-" 1094 case RecvDir: 1095 return "<-chan" 1096 case BothDir: 1097 return "chan" 1098 } 1099 return "ChanDir" + strconv.Itoa(int(d)) 1100 } 1101 1102 // Method returns the i'th method in the type's method set. 1103 func (t *interfaceType) Method(i int) (m Method) { 1104 if i < 0 || i >= len(t.methods) { 1105 return 1106 } 1107 p := &t.methods[i] 1108 pname := t.nameOff(p.name) 1109 m.Name = pname.name() 1110 if !pname.isExported() { 1111 m.PkgPath = pname.pkgPath() 1112 if m.PkgPath == "" { 1113 m.PkgPath = t.pkgPath.name() 1114 } 1115 } 1116 m.Type = toType(t.typeOff(p.typ)) 1117 m.Index = i 1118 return 1119 } 1120 1121 // NumMethod returns the number of interface methods in the type's method set. 1122 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1123 1124 // MethodByName method with the given name in the type's method set. 1125 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1126 if t == nil { 1127 return 1128 } 1129 var p *imethod 1130 for i := range t.methods { 1131 p = &t.methods[i] 1132 if t.nameOff(p.name).name() == name { 1133 return t.Method(i), true 1134 } 1135 } 1136 return 1137 } 1138 1139 // A StructField describes a single field in a struct. 1140 type StructField struct { 1141 // Name is the field name. 1142 Name string 1143 1144 // PkgPath is the package path that qualifies a lower case (unexported) 1145 // field name. It is empty for upper case (exported) field names. 1146 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1147 PkgPath string 1148 1149 Type Type // field type 1150 Tag StructTag // field tag string 1151 Offset uintptr // offset within struct, in bytes 1152 Index []int // index sequence for Type.FieldByIndex 1153 Anonymous bool // is an embedded field 1154 } 1155 1156 // IsExported reports whether the field is exported. 1157 func (f StructField) IsExported() bool { 1158 return f.PkgPath == "" 1159 } 1160 1161 // A StructTag is the tag string in a struct field. 1162 // 1163 // By convention, tag strings are a concatenation of 1164 // optionally space-separated key:"value" pairs. 1165 // Each key is a non-empty string consisting of non-control 1166 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1167 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1168 // characters and Go string literal syntax. 1169 type StructTag string 1170 1171 // Get returns the value associated with key in the tag string. 1172 // If there is no such key in the tag, Get returns the empty string. 1173 // If the tag does not have the conventional format, the value 1174 // returned by Get is unspecified. To determine whether a tag is 1175 // explicitly set to the empty string, use Lookup. 1176 func (tag StructTag) Get(key string) string { 1177 v, _ := tag.Lookup(key) 1178 return v 1179 } 1180 1181 // Lookup returns the value associated with key in the tag string. 1182 // If the key is present in the tag the value (which may be empty) 1183 // is returned. Otherwise the returned value will be the empty string. 1184 // The ok return value reports whether the value was explicitly set in 1185 // the tag string. If the tag does not have the conventional format, 1186 // the value returned by Lookup is unspecified. 1187 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1188 // When modifying this code, also update the validateStructTag code 1189 // in cmd/vet/structtag.go. 1190 1191 for tag != "" { 1192 // Skip leading space. 1193 i := 0 1194 for i < len(tag) && tag[i] == ' ' { 1195 i++ 1196 } 1197 tag = tag[i:] 1198 if tag == "" { 1199 break 1200 } 1201 1202 // Scan to colon. A space, a quote or a control character is a syntax error. 1203 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1204 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1205 // as it is simpler to inspect the tag's bytes than the tag's runes. 1206 i = 0 1207 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1208 i++ 1209 } 1210 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1211 break 1212 } 1213 name := string(tag[:i]) 1214 tag = tag[i+1:] 1215 1216 // Scan quoted string to find value. 1217 i = 1 1218 for i < len(tag) && tag[i] != '"' { 1219 if tag[i] == '\\' { 1220 i++ 1221 } 1222 i++ 1223 } 1224 if i >= len(tag) { 1225 break 1226 } 1227 qvalue := string(tag[:i+1]) 1228 tag = tag[i+1:] 1229 1230 if key == name { 1231 value, err := strconv.Unquote(qvalue) 1232 if err != nil { 1233 break 1234 } 1235 return value, true 1236 } 1237 } 1238 return "", false 1239 } 1240 1241 // Field returns the i'th struct field. 1242 func (t *structType) Field(i int) (f StructField) { 1243 if i < 0 || i >= len(t.fields) { 1244 panic("reflect: Field index out of bounds") 1245 } 1246 p := &t.fields[i] 1247 f.Type = toType(p.typ) 1248 f.Name = p.name.name() 1249 f.Anonymous = p.embedded() 1250 if !p.name.isExported() { 1251 f.PkgPath = t.pkgPath.name() 1252 } 1253 if tag := p.name.tag(); tag != "" { 1254 f.Tag = StructTag(tag) 1255 } 1256 f.Offset = p.offset() 1257 1258 // NOTE(rsc): This is the only allocation in the interface 1259 // presented by a reflect.Type. It would be nice to avoid, 1260 // at least in the common cases, but we need to make sure 1261 // that misbehaving clients of reflect cannot affect other 1262 // uses of reflect. One possibility is CL 5371098, but we 1263 // postponed that ugliness until there is a demonstrated 1264 // need for the performance. This is issue 2320. 1265 f.Index = []int{i} 1266 return 1267 } 1268 1269 // TODO(gri): Should there be an error/bool indicator if the index 1270 // is wrong for FieldByIndex? 1271 1272 // FieldByIndex returns the nested field corresponding to index. 1273 func (t *structType) FieldByIndex(index []int) (f StructField) { 1274 f.Type = toType(&t.rtype) 1275 for i, x := range index { 1276 if i > 0 { 1277 ft := f.Type 1278 if ft.Kind() == Pointer && ft.Elem().Kind() == Struct { 1279 ft = ft.Elem() 1280 } 1281 f.Type = ft 1282 } 1283 f = f.Type.Field(x) 1284 } 1285 return 1286 } 1287 1288 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1289 type fieldScan struct { 1290 typ *structType 1291 index []int 1292 } 1293 1294 // FieldByNameFunc returns the struct field with a name that satisfies the 1295 // match function and a boolean to indicate if the field was found. 1296 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1297 // This uses the same condition that the Go language does: there must be a unique instance 1298 // of the match at a given depth level. If there are multiple instances of a match at the 1299 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1300 // The algorithm is breadth first search, one depth level at a time. 1301 1302 // The current and next slices are work queues: 1303 // current lists the fields to visit on this depth level, 1304 // and next lists the fields on the next lower level. 1305 current := []fieldScan{} 1306 next := []fieldScan{{typ: t}} 1307 1308 // nextCount records the number of times an embedded type has been 1309 // encountered and considered for queueing in the 'next' slice. 1310 // We only queue the first one, but we increment the count on each. 1311 // If a struct type T can be reached more than once at a given depth level, 1312 // then it annihilates itself and need not be considered at all when we 1313 // process that next depth level. 1314 var nextCount map[*structType]int 1315 1316 // visited records the structs that have been considered already. 1317 // Embedded pointer fields can create cycles in the graph of 1318 // reachable embedded types; visited avoids following those cycles. 1319 // It also avoids duplicated effort: if we didn't find the field in an 1320 // embedded type T at level 2, we won't find it in one at level 4 either. 1321 visited := map[*structType]bool{} 1322 1323 for len(next) > 0 { 1324 current, next = next, current[:0] 1325 count := nextCount 1326 nextCount = nil 1327 1328 // Process all the fields at this depth, now listed in 'current'. 1329 // The loop queues embedded fields found in 'next', for processing during the next 1330 // iteration. The multiplicity of the 'current' field counts is recorded 1331 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1332 for _, scan := range current { 1333 t := scan.typ 1334 if visited[t] { 1335 // We've looked through this type before, at a higher level. 1336 // That higher level would shadow the lower level we're now at, 1337 // so this one can't be useful to us. Ignore it. 1338 continue 1339 } 1340 visited[t] = true 1341 for i := range t.fields { 1342 f := &t.fields[i] 1343 // Find name and (for embedded field) type for field f. 1344 fname := f.name.name() 1345 var ntyp *rtype 1346 if f.embedded() { 1347 // Embedded field of type T or *T. 1348 ntyp = f.typ 1349 if ntyp.Kind() == Pointer { 1350 ntyp = ntyp.Elem().common() 1351 } 1352 } 1353 1354 // Does it match? 1355 if match(fname) { 1356 // Potential match 1357 if count[t] > 1 || ok { 1358 // Name appeared multiple times at this level: annihilate. 1359 return StructField{}, false 1360 } 1361 result = t.Field(i) 1362 result.Index = nil 1363 result.Index = append(result.Index, scan.index...) 1364 result.Index = append(result.Index, i) 1365 ok = true 1366 continue 1367 } 1368 1369 // Queue embedded struct fields for processing with next level, 1370 // but only if we haven't seen a match yet at this level and only 1371 // if the embedded types haven't already been queued. 1372 if ok || ntyp == nil || ntyp.Kind() != Struct { 1373 continue 1374 } 1375 styp := (*structType)(unsafe.Pointer(ntyp)) 1376 if nextCount[styp] > 0 { 1377 nextCount[styp] = 2 // exact multiple doesn't matter 1378 continue 1379 } 1380 if nextCount == nil { 1381 nextCount = map[*structType]int{} 1382 } 1383 nextCount[styp] = 1 1384 if count[t] > 1 { 1385 nextCount[styp] = 2 // exact multiple doesn't matter 1386 } 1387 var index []int 1388 index = append(index, scan.index...) 1389 index = append(index, i) 1390 next = append(next, fieldScan{styp, index}) 1391 } 1392 } 1393 if ok { 1394 break 1395 } 1396 } 1397 return 1398 } 1399 1400 // FieldByName returns the struct field with the given name 1401 // and a boolean to indicate if the field was found. 1402 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1403 // Quick check for top-level name, or struct without embedded fields. 1404 hasEmbeds := false 1405 if name != "" { 1406 for i := range t.fields { 1407 tf := &t.fields[i] 1408 if tf.name.name() == name { 1409 return t.Field(i), true 1410 } 1411 if tf.embedded() { 1412 hasEmbeds = true 1413 } 1414 } 1415 } 1416 if !hasEmbeds { 1417 return 1418 } 1419 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1420 } 1421 1422 // TypeOf returns the reflection Type that represents the dynamic type of i. 1423 // If i is a nil interface value, TypeOf returns nil. 1424 func TypeOf(i any) Type { 1425 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1426 return toType(eface.typ) 1427 } 1428 1429 // ptrMap is the cache for PointerTo. 1430 var ptrMap sync.Map // map[*rtype]*ptrType 1431 1432 // PtrTo returns the pointer type with element t. 1433 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1434 // 1435 // PtrTo is the old spelling of PointerTo. 1436 // The two functions behave identically. 1437 func PtrTo(t Type) Type { return PointerTo(t) } 1438 1439 // PointerTo returns the pointer type with element t. 1440 // For example, if t represents type Foo, PointerTo(t) represents *Foo. 1441 func PointerTo(t Type) Type { 1442 return t.(*rtype).ptrTo() 1443 } 1444 1445 func (t *rtype) ptrTo() *rtype { 1446 if t.ptrToThis != 0 { 1447 return t.typeOff(t.ptrToThis) 1448 } 1449 1450 // Check the cache. 1451 if pi, ok := ptrMap.Load(t); ok { 1452 return &pi.(*ptrType).rtype 1453 } 1454 1455 // Look in known types. 1456 s := "*" + t.String() 1457 for _, tt := range typesByString(s) { 1458 p := (*ptrType)(unsafe.Pointer(tt)) 1459 if p.elem != t { 1460 continue 1461 } 1462 pi, _ := ptrMap.LoadOrStore(t, p) 1463 return &pi.(*ptrType).rtype 1464 } 1465 1466 // Create a new ptrType starting with the description 1467 // of an *unsafe.Pointer. 1468 var iptr any = (*unsafe.Pointer)(nil) 1469 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1470 pp := *prototype 1471 1472 pp.str = resolveReflectName(newName(s, "", false)) 1473 pp.ptrToThis = 0 1474 1475 // For the type structures linked into the binary, the 1476 // compiler provides a good hash of the string. 1477 // Create a good hash for the new string by using 1478 // the FNV-1 hash's mixing function to combine the 1479 // old hash and the new "*". 1480 pp.hash = fnv1(t.hash, '*') 1481 1482 pp.elem = t 1483 1484 pi, _ := ptrMap.LoadOrStore(t, &pp) 1485 return &pi.(*ptrType).rtype 1486 } 1487 1488 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1489 func fnv1(x uint32, list ...byte) uint32 { 1490 for _, b := range list { 1491 x = x*16777619 ^ uint32(b) 1492 } 1493 return x 1494 } 1495 1496 func (t *rtype) Implements(u Type) bool { 1497 if u == nil { 1498 panic("reflect: nil type passed to Type.Implements") 1499 } 1500 if u.Kind() != Interface { 1501 panic("reflect: non-interface type passed to Type.Implements") 1502 } 1503 return implements(u.(*rtype), t) 1504 } 1505 1506 func (t *rtype) AssignableTo(u Type) bool { 1507 if u == nil { 1508 panic("reflect: nil type passed to Type.AssignableTo") 1509 } 1510 uu := u.(*rtype) 1511 return directlyAssignable(uu, t) || implements(uu, t) 1512 } 1513 1514 func (t *rtype) ConvertibleTo(u Type) bool { 1515 if u == nil { 1516 panic("reflect: nil type passed to Type.ConvertibleTo") 1517 } 1518 uu := u.(*rtype) 1519 return convertOp(uu, t) != nil 1520 } 1521 1522 func (t *rtype) Comparable() bool { 1523 return t.equal != nil 1524 } 1525 1526 // implements reports whether the type V implements the interface type T. 1527 func implements(T, V *rtype) bool { 1528 if T.Kind() != Interface { 1529 return false 1530 } 1531 t := (*interfaceType)(unsafe.Pointer(T)) 1532 if len(t.methods) == 0 { 1533 return true 1534 } 1535 1536 // The same algorithm applies in both cases, but the 1537 // method tables for an interface type and a concrete type 1538 // are different, so the code is duplicated. 1539 // In both cases the algorithm is a linear scan over the two 1540 // lists - T's methods and V's methods - simultaneously. 1541 // Since method tables are stored in a unique sorted order 1542 // (alphabetical, with no duplicate method names), the scan 1543 // through V's methods must hit a match for each of T's 1544 // methods along the way, or else V does not implement T. 1545 // This lets us run the scan in overall linear time instead of 1546 // the quadratic time a naive search would require. 1547 // See also ../runtime/iface.go. 1548 if V.Kind() == Interface { 1549 v := (*interfaceType)(unsafe.Pointer(V)) 1550 i := 0 1551 for j := 0; j < len(v.methods); j++ { 1552 tm := &t.methods[i] 1553 tmName := t.nameOff(tm.name) 1554 vm := &v.methods[j] 1555 vmName := V.nameOff(vm.name) 1556 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1557 if !tmName.isExported() { 1558 tmPkgPath := tmName.pkgPath() 1559 if tmPkgPath == "" { 1560 tmPkgPath = t.pkgPath.name() 1561 } 1562 vmPkgPath := vmName.pkgPath() 1563 if vmPkgPath == "" { 1564 vmPkgPath = v.pkgPath.name() 1565 } 1566 if tmPkgPath != vmPkgPath { 1567 continue 1568 } 1569 } 1570 if i++; i >= len(t.methods) { 1571 return true 1572 } 1573 } 1574 } 1575 return false 1576 } 1577 1578 v := V.uncommon() 1579 if v == nil { 1580 return false 1581 } 1582 i := 0 1583 vmethods := v.methods() 1584 for j := 0; j < int(v.mcount); j++ { 1585 tm := &t.methods[i] 1586 tmName := t.nameOff(tm.name) 1587 vm := vmethods[j] 1588 vmName := V.nameOff(vm.name) 1589 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1590 if !tmName.isExported() { 1591 tmPkgPath := tmName.pkgPath() 1592 if tmPkgPath == "" { 1593 tmPkgPath = t.pkgPath.name() 1594 } 1595 vmPkgPath := vmName.pkgPath() 1596 if vmPkgPath == "" { 1597 vmPkgPath = V.nameOff(v.pkgPath).name() 1598 } 1599 if tmPkgPath != vmPkgPath { 1600 continue 1601 } 1602 } 1603 if i++; i >= len(t.methods) { 1604 return true 1605 } 1606 } 1607 } 1608 return false 1609 } 1610 1611 // specialChannelAssignability reports whether a value x of channel type V 1612 // can be directly assigned (using memmove) to another channel type T. 1613 // https://golang.org/doc/go_spec.html#Assignability 1614 // T and V must be both of Chan kind. 1615 func specialChannelAssignability(T, V *rtype) bool { 1616 // Special case: 1617 // x is a bidirectional channel value, T is a channel type, 1618 // x's type V and T have identical element types, 1619 // and at least one of V or T is not a defined type. 1620 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1621 } 1622 1623 // directlyAssignable reports whether a value x of type V can be directly 1624 // assigned (using memmove) to a value of type T. 1625 // https://golang.org/doc/go_spec.html#Assignability 1626 // Ignoring the interface rules (implemented elsewhere) 1627 // and the ideal constant rules (no ideal constants at run time). 1628 func directlyAssignable(T, V *rtype) bool { 1629 // x's type V is identical to T? 1630 if T == V { 1631 return true 1632 } 1633 1634 // Otherwise at least one of T and V must not be defined 1635 // and they must have the same kind. 1636 if T.hasName() && V.hasName() || T.Kind() != V.Kind() { 1637 return false 1638 } 1639 1640 if T.Kind() == Chan && specialChannelAssignability(T, V) { 1641 return true 1642 } 1643 1644 // x's type T and V must have identical underlying types. 1645 return haveIdenticalUnderlyingType(T, V, true) 1646 } 1647 1648 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1649 if cmpTags { 1650 return T == V 1651 } 1652 1653 if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() { 1654 return false 1655 } 1656 1657 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1658 } 1659 1660 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1661 if T == V { 1662 return true 1663 } 1664 1665 kind := T.Kind() 1666 if kind != V.Kind() { 1667 return false 1668 } 1669 1670 // Non-composite types of equal kind have same underlying type 1671 // (the predefined instance of the type). 1672 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1673 return true 1674 } 1675 1676 // Composite types. 1677 switch kind { 1678 case Array: 1679 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1680 1681 case Chan: 1682 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1683 1684 case Func: 1685 t := (*funcType)(unsafe.Pointer(T)) 1686 v := (*funcType)(unsafe.Pointer(V)) 1687 if t.outCount != v.outCount || t.inCount != v.inCount { 1688 return false 1689 } 1690 for i := 0; i < t.NumIn(); i++ { 1691 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1692 return false 1693 } 1694 } 1695 for i := 0; i < t.NumOut(); i++ { 1696 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1697 return false 1698 } 1699 } 1700 return true 1701 1702 case Interface: 1703 t := (*interfaceType)(unsafe.Pointer(T)) 1704 v := (*interfaceType)(unsafe.Pointer(V)) 1705 if len(t.methods) == 0 && len(v.methods) == 0 { 1706 return true 1707 } 1708 // Might have the same methods but still 1709 // need a run time conversion. 1710 return false 1711 1712 case Map: 1713 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1714 1715 case Pointer, Slice: 1716 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1717 1718 case Struct: 1719 t := (*structType)(unsafe.Pointer(T)) 1720 v := (*structType)(unsafe.Pointer(V)) 1721 if len(t.fields) != len(v.fields) { 1722 return false 1723 } 1724 if t.pkgPath.name() != v.pkgPath.name() { 1725 return false 1726 } 1727 for i := range t.fields { 1728 tf := &t.fields[i] 1729 vf := &v.fields[i] 1730 if tf.name.name() != vf.name.name() { 1731 return false 1732 } 1733 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1734 return false 1735 } 1736 if cmpTags && tf.name.tag() != vf.name.tag() { 1737 return false 1738 } 1739 if tf.offsetEmbed != vf.offsetEmbed { 1740 return false 1741 } 1742 } 1743 return true 1744 } 1745 1746 return false 1747 } 1748 1749 // typelinks is implemented in package runtime. 1750 // It returns a slice of the sections in each module, 1751 // and a slice of *rtype offsets in each module. 1752 // 1753 // The types in each module are sorted by string. That is, the first 1754 // two linked types of the first module are: 1755 // 1756 // d0 := sections[0] 1757 // t1 := (*rtype)(add(d0, offset[0][0])) 1758 // t2 := (*rtype)(add(d0, offset[0][1])) 1759 // 1760 // and 1761 // 1762 // t1.String() < t2.String() 1763 // 1764 // Note that strings are not unique identifiers for types: 1765 // there can be more than one with a given string. 1766 // Only types we might want to look up are included: 1767 // pointers, channels, maps, slices, and arrays. 1768 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1769 1770 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1771 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1772 } 1773 1774 // typesByString returns the subslice of typelinks() whose elements have 1775 // the given string representation. 1776 // It may be empty (no known types with that string) or may have 1777 // multiple elements (multiple types with that string). 1778 func typesByString(s string) []*rtype { 1779 sections, offset := typelinks() 1780 var ret []*rtype 1781 1782 for offsI, offs := range offset { 1783 section := sections[offsI] 1784 1785 // We are looking for the first index i where the string becomes >= s. 1786 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1787 i, j := 0, len(offs) 1788 for i < j { 1789 h := i + (j-i)>>1 // avoid overflow when computing h 1790 // i ≤ h < j 1791 if !(rtypeOff(section, offs[h]).String() >= s) { 1792 i = h + 1 // preserves f(i-1) == false 1793 } else { 1794 j = h // preserves f(j) == true 1795 } 1796 } 1797 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1798 1799 // Having found the first, linear scan forward to find the last. 1800 // We could do a second binary search, but the caller is going 1801 // to do a linear scan anyway. 1802 for j := i; j < len(offs); j++ { 1803 typ := rtypeOff(section, offs[j]) 1804 if typ.String() != s { 1805 break 1806 } 1807 ret = append(ret, typ) 1808 } 1809 } 1810 return ret 1811 } 1812 1813 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1814 var lookupCache sync.Map // map[cacheKey]*rtype 1815 1816 // A cacheKey is the key for use in the lookupCache. 1817 // Four values describe any of the types we are looking for: 1818 // type kind, one or two subtypes, and an extra integer. 1819 type cacheKey struct { 1820 kind Kind 1821 t1 *rtype 1822 t2 *rtype 1823 extra uintptr 1824 } 1825 1826 // The funcLookupCache caches FuncOf lookups. 1827 // FuncOf does not share the common lookupCache since cacheKey is not 1828 // sufficient to represent functions unambiguously. 1829 var funcLookupCache struct { 1830 sync.Mutex // Guards stores (but not loads) on m. 1831 1832 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1833 // Elements of m are append-only and thus safe for concurrent reading. 1834 m sync.Map 1835 } 1836 1837 // ChanOf returns the channel type with the given direction and element type. 1838 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1839 // 1840 // The gc runtime imposes a limit of 64 kB on channel element types. 1841 // If t's size is equal to or exceeds this limit, ChanOf panics. 1842 func ChanOf(dir ChanDir, t Type) Type { 1843 typ := t.(*rtype) 1844 1845 // Look in cache. 1846 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1847 if ch, ok := lookupCache.Load(ckey); ok { 1848 return ch.(*rtype) 1849 } 1850 1851 // This restriction is imposed by the gc compiler and the runtime. 1852 if typ.size >= 1<<16 { 1853 panic("reflect.ChanOf: element size too large") 1854 } 1855 1856 // Look in known types. 1857 var s string 1858 switch dir { 1859 default: 1860 panic("reflect.ChanOf: invalid dir") 1861 case SendDir: 1862 s = "chan<- " + typ.String() 1863 case RecvDir: 1864 s = "<-chan " + typ.String() 1865 case BothDir: 1866 typeStr := typ.String() 1867 if typeStr[0] == '<' { 1868 // typ is recv chan, need parentheses as "<-" associates with leftmost 1869 // chan possible, see: 1870 // * https://golang.org/ref/spec#Channel_types 1871 // * https://github.com/golang/go/issues/39897 1872 s = "chan (" + typeStr + ")" 1873 } else { 1874 s = "chan " + typeStr 1875 } 1876 } 1877 for _, tt := range typesByString(s) { 1878 ch := (*chanType)(unsafe.Pointer(tt)) 1879 if ch.elem == typ && ch.dir == uintptr(dir) { 1880 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1881 return ti.(Type) 1882 } 1883 } 1884 1885 // Make a channel type. 1886 var ichan any = (chan unsafe.Pointer)(nil) 1887 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1888 ch := *prototype 1889 ch.tflag = tflagRegularMemory 1890 ch.dir = uintptr(dir) 1891 ch.str = resolveReflectName(newName(s, "", false)) 1892 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1893 ch.elem = typ 1894 1895 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) 1896 return ti.(Type) 1897 } 1898 1899 // MapOf returns the map type with the given key and element types. 1900 // For example, if k represents int and e represents string, 1901 // MapOf(k, e) represents map[int]string. 1902 // 1903 // If the key type is not a valid map key type (that is, if it does 1904 // not implement Go's == operator), MapOf panics. 1905 func MapOf(key, elem Type) Type { 1906 ktyp := key.(*rtype) 1907 etyp := elem.(*rtype) 1908 1909 if ktyp.equal == nil { 1910 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1911 } 1912 1913 // Look in cache. 1914 ckey := cacheKey{Map, ktyp, etyp, 0} 1915 if mt, ok := lookupCache.Load(ckey); ok { 1916 return mt.(Type) 1917 } 1918 1919 // Look in known types. 1920 s := "map[" + ktyp.String() + "]" + etyp.String() 1921 for _, tt := range typesByString(s) { 1922 mt := (*mapType)(unsafe.Pointer(tt)) 1923 if mt.key == ktyp && mt.elem == etyp { 1924 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1925 return ti.(Type) 1926 } 1927 } 1928 1929 // Make a map type. 1930 // Note: flag values must match those used in the TMAP case 1931 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. 1932 var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1933 mt := **(**mapType)(unsafe.Pointer(&imap)) 1934 mt.str = resolveReflectName(newName(s, "", false)) 1935 mt.tflag = 0 1936 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1937 mt.key = ktyp 1938 mt.elem = etyp 1939 mt.bucket = bucketOf(ktyp, etyp) 1940 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1941 return typehash(ktyp, p, seed) 1942 } 1943 mt.flags = 0 1944 if ktyp.size > maxKeySize { 1945 mt.keysize = uint8(goarch.PtrSize) 1946 mt.flags |= 1 // indirect key 1947 } else { 1948 mt.keysize = uint8(ktyp.size) 1949 } 1950 if etyp.size > maxValSize { 1951 mt.valuesize = uint8(goarch.PtrSize) 1952 mt.flags |= 2 // indirect value 1953 } else { 1954 mt.valuesize = uint8(etyp.size) 1955 } 1956 mt.bucketsize = uint16(mt.bucket.size) 1957 if isReflexive(ktyp) { 1958 mt.flags |= 4 1959 } 1960 if needKeyUpdate(ktyp) { 1961 mt.flags |= 8 1962 } 1963 if hashMightPanic(ktyp) { 1964 mt.flags |= 16 1965 } 1966 mt.ptrToThis = 0 1967 1968 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) 1969 return ti.(Type) 1970 } 1971 1972 // TODO(crawshaw): as these funcTypeFixedN structs have no methods, 1973 // they could be defined at runtime using the StructOf function. 1974 type funcTypeFixed4 struct { 1975 funcType 1976 args [4]*rtype 1977 } 1978 type funcTypeFixed8 struct { 1979 funcType 1980 args [8]*rtype 1981 } 1982 type funcTypeFixed16 struct { 1983 funcType 1984 args [16]*rtype 1985 } 1986 type funcTypeFixed32 struct { 1987 funcType 1988 args [32]*rtype 1989 } 1990 type funcTypeFixed64 struct { 1991 funcType 1992 args [64]*rtype 1993 } 1994 type funcTypeFixed128 struct { 1995 funcType 1996 args [128]*rtype 1997 } 1998 1999 // FuncOf returns the function type with the given argument and result types. 2000 // For example if k represents int and e represents string, 2001 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 2002 // 2003 // The variadic argument controls whether the function is variadic. FuncOf 2004 // panics if the in[len(in)-1] does not represent a slice and variadic is 2005 // true. 2006 func FuncOf(in, out []Type, variadic bool) Type { 2007 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 2008 panic("reflect.FuncOf: last arg of variadic func must be slice") 2009 } 2010 2011 // Make a func type. 2012 var ifunc any = (func())(nil) 2013 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 2014 n := len(in) + len(out) 2015 2016 var ft *funcType 2017 var args []*rtype 2018 switch { 2019 case n <= 4: 2020 fixed := new(funcTypeFixed4) 2021 args = fixed.args[:0:len(fixed.args)] 2022 ft = &fixed.funcType 2023 case n <= 8: 2024 fixed := new(funcTypeFixed8) 2025 args = fixed.args[:0:len(fixed.args)] 2026 ft = &fixed.funcType 2027 case n <= 16: 2028 fixed := new(funcTypeFixed16) 2029 args = fixed.args[:0:len(fixed.args)] 2030 ft = &fixed.funcType 2031 case n <= 32: 2032 fixed := new(funcTypeFixed32) 2033 args = fixed.args[:0:len(fixed.args)] 2034 ft = &fixed.funcType 2035 case n <= 64: 2036 fixed := new(funcTypeFixed64) 2037 args = fixed.args[:0:len(fixed.args)] 2038 ft = &fixed.funcType 2039 case n <= 128: 2040 fixed := new(funcTypeFixed128) 2041 args = fixed.args[:0:len(fixed.args)] 2042 ft = &fixed.funcType 2043 default: 2044 panic("reflect.FuncOf: too many arguments") 2045 } 2046 *ft = *prototype 2047 2048 // Build a hash and minimally populate ft. 2049 var hash uint32 2050 for _, in := range in { 2051 t := in.(*rtype) 2052 args = append(args, t) 2053 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2054 } 2055 if variadic { 2056 hash = fnv1(hash, 'v') 2057 } 2058 hash = fnv1(hash, '.') 2059 for _, out := range out { 2060 t := out.(*rtype) 2061 args = append(args, t) 2062 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2063 } 2064 if len(args) > 50 { 2065 panic("reflect.FuncOf does not support more than 50 arguments") 2066 } 2067 ft.tflag = 0 2068 ft.hash = hash 2069 ft.inCount = uint16(len(in)) 2070 ft.outCount = uint16(len(out)) 2071 if variadic { 2072 ft.outCount |= 1 << 15 2073 } 2074 2075 // Look in cache. 2076 if ts, ok := funcLookupCache.m.Load(hash); ok { 2077 for _, t := range ts.([]*rtype) { 2078 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2079 return t 2080 } 2081 } 2082 } 2083 2084 // Not in cache, lock and retry. 2085 funcLookupCache.Lock() 2086 defer funcLookupCache.Unlock() 2087 if ts, ok := funcLookupCache.m.Load(hash); ok { 2088 for _, t := range ts.([]*rtype) { 2089 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2090 return t 2091 } 2092 } 2093 } 2094 2095 addToCache := func(tt *rtype) Type { 2096 var rts []*rtype 2097 if rti, ok := funcLookupCache.m.Load(hash); ok { 2098 rts = rti.([]*rtype) 2099 } 2100 funcLookupCache.m.Store(hash, append(rts, tt)) 2101 return tt 2102 } 2103 2104 // Look in known types for the same string representation. 2105 str := funcStr(ft) 2106 for _, tt := range typesByString(str) { 2107 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2108 return addToCache(tt) 2109 } 2110 } 2111 2112 // Populate the remaining fields of ft and store in cache. 2113 ft.str = resolveReflectName(newName(str, "", false)) 2114 ft.ptrToThis = 0 2115 return addToCache(&ft.rtype) 2116 } 2117 2118 // funcStr builds a string representation of a funcType. 2119 func funcStr(ft *funcType) string { 2120 repr := make([]byte, 0, 64) 2121 repr = append(repr, "func("...) 2122 for i, t := range ft.in() { 2123 if i > 0 { 2124 repr = append(repr, ", "...) 2125 } 2126 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2127 repr = append(repr, "..."...) 2128 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2129 } else { 2130 repr = append(repr, t.String()...) 2131 } 2132 } 2133 repr = append(repr, ')') 2134 out := ft.out() 2135 if len(out) == 1 { 2136 repr = append(repr, ' ') 2137 } else if len(out) > 1 { 2138 repr = append(repr, " ("...) 2139 } 2140 for i, t := range out { 2141 if i > 0 { 2142 repr = append(repr, ", "...) 2143 } 2144 repr = append(repr, t.String()...) 2145 } 2146 if len(out) > 1 { 2147 repr = append(repr, ')') 2148 } 2149 return string(repr) 2150 } 2151 2152 // isReflexive reports whether the == operation on the type is reflexive. 2153 // That is, x == x for all values x of type t. 2154 func isReflexive(t *rtype) bool { 2155 switch t.Kind() { 2156 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: 2157 return true 2158 case Float32, Float64, Complex64, Complex128, Interface: 2159 return false 2160 case Array: 2161 tt := (*arrayType)(unsafe.Pointer(t)) 2162 return isReflexive(tt.elem) 2163 case Struct: 2164 tt := (*structType)(unsafe.Pointer(t)) 2165 for _, f := range tt.fields { 2166 if !isReflexive(f.typ) { 2167 return false 2168 } 2169 } 2170 return true 2171 default: 2172 // Func, Map, Slice, Invalid 2173 panic("isReflexive called on non-key type " + t.String()) 2174 } 2175 } 2176 2177 // needKeyUpdate reports whether map overwrites require the key to be copied. 2178 func needKeyUpdate(t *rtype) bool { 2179 switch t.Kind() { 2180 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: 2181 return false 2182 case Float32, Float64, Complex64, Complex128, Interface, String: 2183 // Float keys can be updated from +0 to -0. 2184 // String keys can be updated to use a smaller backing store. 2185 // Interfaces might have floats of strings in them. 2186 return true 2187 case Array: 2188 tt := (*arrayType)(unsafe.Pointer(t)) 2189 return needKeyUpdate(tt.elem) 2190 case Struct: 2191 tt := (*structType)(unsafe.Pointer(t)) 2192 for _, f := range tt.fields { 2193 if needKeyUpdate(f.typ) { 2194 return true 2195 } 2196 } 2197 return false 2198 default: 2199 // Func, Map, Slice, Invalid 2200 panic("needKeyUpdate called on non-key type " + t.String()) 2201 } 2202 } 2203 2204 // hashMightPanic reports whether the hash of a map key of type t might panic. 2205 func hashMightPanic(t *rtype) bool { 2206 switch t.Kind() { 2207 case Interface: 2208 return true 2209 case Array: 2210 tt := (*arrayType)(unsafe.Pointer(t)) 2211 return hashMightPanic(tt.elem) 2212 case Struct: 2213 tt := (*structType)(unsafe.Pointer(t)) 2214 for _, f := range tt.fields { 2215 if hashMightPanic(f.typ) { 2216 return true 2217 } 2218 } 2219 return false 2220 default: 2221 return false 2222 } 2223 } 2224 2225 // Make sure these routines stay in sync with ../../runtime/map.go! 2226 // These types exist only for GC, so we only fill out GC relevant info. 2227 // Currently, that's just size and the GC program. We also fill in string 2228 // for possible debugging use. 2229 const ( 2230 bucketSize uintptr = 8 2231 maxKeySize uintptr = 128 2232 maxValSize uintptr = 128 2233 ) 2234 2235 func bucketOf(ktyp, etyp *rtype) *rtype { 2236 if ktyp.size > maxKeySize { 2237 ktyp = PointerTo(ktyp).(*rtype) 2238 } 2239 if etyp.size > maxValSize { 2240 etyp = PointerTo(etyp).(*rtype) 2241 } 2242 2243 // Prepare GC data if any. 2244 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2245 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2246 // Note that since the key and value are known to be <= 128 bytes, 2247 // they're guaranteed to have bitmaps instead of GC programs. 2248 var gcdata *byte 2249 var ptrdata uintptr 2250 var overflowPad uintptr 2251 2252 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + goarch.PtrSize 2253 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2254 panic("reflect: bad size computation in MapOf") 2255 } 2256 2257 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 { 2258 nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize 2259 mask := make([]byte, (nptr+7)/8) 2260 base := bucketSize / goarch.PtrSize 2261 2262 if ktyp.ptrdata != 0 { 2263 emitGCMask(mask, base, ktyp, bucketSize) 2264 } 2265 base += bucketSize * ktyp.size / goarch.PtrSize 2266 2267 if etyp.ptrdata != 0 { 2268 emitGCMask(mask, base, etyp, bucketSize) 2269 } 2270 base += bucketSize * etyp.size / goarch.PtrSize 2271 base += overflowPad / goarch.PtrSize 2272 2273 word := base 2274 mask[word/8] |= 1 << (word % 8) 2275 gcdata = &mask[0] 2276 ptrdata = (word + 1) * goarch.PtrSize 2277 2278 // overflow word must be last 2279 if ptrdata != size { 2280 panic("reflect: bad layout computation in MapOf") 2281 } 2282 } 2283 2284 b := &rtype{ 2285 align: goarch.PtrSize, 2286 size: size, 2287 kind: uint8(Struct), 2288 ptrdata: ptrdata, 2289 gcdata: gcdata, 2290 } 2291 if overflowPad > 0 { 2292 b.align = 8 2293 } 2294 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2295 b.str = resolveReflectName(newName(s, "", false)) 2296 return b 2297 } 2298 2299 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2300 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end] 2301 } 2302 2303 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2304 // offset base. 2305 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { 2306 if typ.kind&kindGCProg != 0 { 2307 panic("reflect: unexpected GC program") 2308 } 2309 ptrs := typ.ptrdata / goarch.PtrSize 2310 words := typ.size / goarch.PtrSize 2311 mask := typ.gcSlice(0, (ptrs+7)/8) 2312 for j := uintptr(0); j < ptrs; j++ { 2313 if (mask[j/8]>>(j%8))&1 != 0 { 2314 for i := uintptr(0); i < n; i++ { 2315 k := base + i*words + j 2316 out[k/8] |= 1 << (k % 8) 2317 } 2318 } 2319 } 2320 } 2321 2322 // appendGCProg appends the GC program for the first ptrdata bytes of 2323 // typ to dst and returns the extended slice. 2324 func appendGCProg(dst []byte, typ *rtype) []byte { 2325 if typ.kind&kindGCProg != 0 { 2326 // Element has GC program; emit one element. 2327 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata))) 2328 prog := typ.gcSlice(4, 4+n-1) 2329 return append(dst, prog...) 2330 } 2331 2332 // Element is small with pointer mask; use as literal bits. 2333 ptrs := typ.ptrdata / goarch.PtrSize 2334 mask := typ.gcSlice(0, (ptrs+7)/8) 2335 2336 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2337 for ; ptrs > 120; ptrs -= 120 { 2338 dst = append(dst, 120) 2339 dst = append(dst, mask[:15]...) 2340 mask = mask[15:] 2341 } 2342 2343 dst = append(dst, byte(ptrs)) 2344 dst = append(dst, mask...) 2345 return dst 2346 } 2347 2348 // SliceOf returns the slice type with element type t. 2349 // For example, if t represents int, SliceOf(t) represents []int. 2350 func SliceOf(t Type) Type { 2351 typ := t.(*rtype) 2352 2353 // Look in cache. 2354 ckey := cacheKey{Slice, typ, nil, 0} 2355 if slice, ok := lookupCache.Load(ckey); ok { 2356 return slice.(Type) 2357 } 2358 2359 // Look in known types. 2360 s := "[]" + typ.String() 2361 for _, tt := range typesByString(s) { 2362 slice := (*sliceType)(unsafe.Pointer(tt)) 2363 if slice.elem == typ { 2364 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2365 return ti.(Type) 2366 } 2367 } 2368 2369 // Make a slice type. 2370 var islice any = ([]unsafe.Pointer)(nil) 2371 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2372 slice := *prototype 2373 slice.tflag = 0 2374 slice.str = resolveReflectName(newName(s, "", false)) 2375 slice.hash = fnv1(typ.hash, '[') 2376 slice.elem = typ 2377 slice.ptrToThis = 0 2378 2379 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) 2380 return ti.(Type) 2381 } 2382 2383 // The structLookupCache caches StructOf lookups. 2384 // StructOf does not share the common lookupCache since we need to pin 2385 // the memory associated with *structTypeFixedN. 2386 var structLookupCache struct { 2387 sync.Mutex // Guards stores (but not loads) on m. 2388 2389 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2390 // Elements in m are append-only and thus safe for concurrent reading. 2391 m sync.Map 2392 } 2393 2394 type structTypeUncommon struct { 2395 structType 2396 u uncommonType 2397 } 2398 2399 // isLetter reports whether a given 'rune' is classified as a Letter. 2400 func isLetter(ch rune) bool { 2401 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2402 } 2403 2404 // isValidFieldName checks if a string is a valid (struct) field name or not. 2405 // 2406 // According to the language spec, a field name should be an identifier. 2407 // 2408 // identifier = letter { letter | unicode_digit } . 2409 // letter = unicode_letter | "_" . 2410 func isValidFieldName(fieldName string) bool { 2411 for i, c := range fieldName { 2412 if i == 0 && !isLetter(c) { 2413 return false 2414 } 2415 2416 if !(isLetter(c) || unicode.IsDigit(c)) { 2417 return false 2418 } 2419 } 2420 2421 return len(fieldName) > 0 2422 } 2423 2424 // StructOf returns the struct type containing fields. 2425 // The Offset and Index fields are ignored and computed as they would be 2426 // by the compiler. 2427 // 2428 // StructOf currently does not generate wrapper methods for embedded 2429 // fields and panics if passed unexported StructFields. 2430 // These limitations may be lifted in a future version. 2431 func StructOf(fields []StructField) Type { 2432 var ( 2433 hash = fnv1(0, []byte("struct {")...) 2434 size uintptr 2435 typalign uint8 2436 comparable = true 2437 methods []method 2438 2439 fs = make([]structField, len(fields)) 2440 repr = make([]byte, 0, 64) 2441 fset = map[string]struct{}{} // fields' names 2442 2443 hasGCProg = false // records whether a struct-field type has a GCProg 2444 ) 2445 2446 lastzero := uintptr(0) 2447 repr = append(repr, "struct {"...) 2448 pkgpath := "" 2449 for i, field := range fields { 2450 if field.Name == "" { 2451 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2452 } 2453 if !isValidFieldName(field.Name) { 2454 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2455 } 2456 if field.Type == nil { 2457 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2458 } 2459 f, fpkgpath := runtimeStructField(field) 2460 ft := f.typ 2461 if ft.kind&kindGCProg != 0 { 2462 hasGCProg = true 2463 } 2464 if fpkgpath != "" { 2465 if pkgpath == "" { 2466 pkgpath = fpkgpath 2467 } else if pkgpath != fpkgpath { 2468 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2469 } 2470 } 2471 2472 // Update string and hash 2473 name := f.name.name() 2474 hash = fnv1(hash, []byte(name)...) 2475 repr = append(repr, (" " + name)...) 2476 if f.embedded() { 2477 // Embedded field 2478 if f.typ.Kind() == Pointer { 2479 // Embedded ** and *interface{} are illegal 2480 elem := ft.Elem() 2481 if k := elem.Kind(); k == Pointer || k == Interface { 2482 panic("reflect.StructOf: illegal embedded field type " + ft.String()) 2483 } 2484 } 2485 2486 switch f.typ.Kind() { 2487 case Interface: 2488 ift := (*interfaceType)(unsafe.Pointer(ft)) 2489 for im, m := range ift.methods { 2490 if ift.nameOff(m.name).pkgPath() != "" { 2491 // TODO(sbinet). Issue 15924. 2492 panic("reflect: embedded interface with unexported method(s) not implemented") 2493 } 2494 2495 var ( 2496 mtyp = ift.typeOff(m.typ) 2497 ifield = i 2498 imethod = im 2499 ifn Value 2500 tfn Value 2501 ) 2502 2503 if ft.kind&kindDirectIface != 0 { 2504 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2505 var args []Value 2506 var recv = in[0] 2507 if len(in) > 1 { 2508 args = in[1:] 2509 } 2510 return recv.Field(ifield).Method(imethod).Call(args) 2511 }) 2512 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2513 var args []Value 2514 var recv = in[0] 2515 if len(in) > 1 { 2516 args = in[1:] 2517 } 2518 return recv.Field(ifield).Method(imethod).Call(args) 2519 }) 2520 } else { 2521 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2522 var args []Value 2523 var recv = in[0] 2524 if len(in) > 1 { 2525 args = in[1:] 2526 } 2527 return recv.Field(ifield).Method(imethod).Call(args) 2528 }) 2529 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2530 var args []Value 2531 var recv = Indirect(in[0]) 2532 if len(in) > 1 { 2533 args = in[1:] 2534 } 2535 return recv.Field(ifield).Method(imethod).Call(args) 2536 }) 2537 } 2538 2539 methods = append(methods, method{ 2540 name: resolveReflectName(ift.nameOff(m.name)), 2541 mtyp: resolveReflectType(mtyp), 2542 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2543 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2544 }) 2545 } 2546 case Pointer: 2547 ptr := (*ptrType)(unsafe.Pointer(ft)) 2548 if unt := ptr.uncommon(); unt != nil { 2549 if i > 0 && unt.mcount > 0 { 2550 // Issue 15924. 2551 panic("reflect: embedded type with methods not implemented if type is not first field") 2552 } 2553 if len(fields) > 1 { 2554 panic("reflect: embedded type with methods not implemented if there is more than one field") 2555 } 2556 for _, m := range unt.methods() { 2557 mname := ptr.nameOff(m.name) 2558 if mname.pkgPath() != "" { 2559 // TODO(sbinet). 2560 // Issue 15924. 2561 panic("reflect: embedded interface with unexported method(s) not implemented") 2562 } 2563 methods = append(methods, method{ 2564 name: resolveReflectName(mname), 2565 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2566 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2567 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2568 }) 2569 } 2570 } 2571 if unt := ptr.elem.uncommon(); unt != nil { 2572 for _, m := range unt.methods() { 2573 mname := ptr.nameOff(m.name) 2574 if mname.pkgPath() != "" { 2575 // TODO(sbinet) 2576 // Issue 15924. 2577 panic("reflect: embedded interface with unexported method(s) not implemented") 2578 } 2579 methods = append(methods, method{ 2580 name: resolveReflectName(mname), 2581 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2582 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2583 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2584 }) 2585 } 2586 } 2587 default: 2588 if unt := ft.uncommon(); unt != nil { 2589 if i > 0 && unt.mcount > 0 { 2590 // Issue 15924. 2591 panic("reflect: embedded type with methods not implemented if type is not first field") 2592 } 2593 if len(fields) > 1 && ft.kind&kindDirectIface != 0 { 2594 panic("reflect: embedded type with methods not implemented for non-pointer type") 2595 } 2596 for _, m := range unt.methods() { 2597 mname := ft.nameOff(m.name) 2598 if mname.pkgPath() != "" { 2599 // TODO(sbinet) 2600 // Issue 15924. 2601 panic("reflect: embedded interface with unexported method(s) not implemented") 2602 } 2603 methods = append(methods, method{ 2604 name: resolveReflectName(mname), 2605 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2606 ifn: resolveReflectText(ft.textOff(m.ifn)), 2607 tfn: resolveReflectText(ft.textOff(m.tfn)), 2608 }) 2609 2610 } 2611 } 2612 } 2613 } 2614 if _, dup := fset[name]; dup && name != "_" { 2615 panic("reflect.StructOf: duplicate field " + name) 2616 } 2617 fset[name] = struct{}{} 2618 2619 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2620 2621 repr = append(repr, (" " + ft.String())...) 2622 if f.name.hasTag() { 2623 hash = fnv1(hash, []byte(f.name.tag())...) 2624 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2625 } 2626 if i < len(fields)-1 { 2627 repr = append(repr, ';') 2628 } 2629 2630 comparable = comparable && (ft.equal != nil) 2631 2632 offset := align(size, uintptr(ft.align)) 2633 if ft.align > typalign { 2634 typalign = ft.align 2635 } 2636 size = offset + ft.size 2637 f.offsetEmbed |= offset << 1 2638 2639 if ft.size == 0 { 2640 lastzero = size 2641 } 2642 2643 fs[i] = f 2644 } 2645 2646 if size > 0 && lastzero == size { 2647 // This is a non-zero sized struct that ends in a 2648 // zero-sized field. We add an extra byte of padding, 2649 // to ensure that taking the address of the final 2650 // zero-sized field can't manufacture a pointer to the 2651 // next object in the heap. See issue 9401. 2652 size++ 2653 } 2654 2655 var typ *structType 2656 var ut *uncommonType 2657 2658 if len(methods) == 0 { 2659 t := new(structTypeUncommon) 2660 typ = &t.structType 2661 ut = &t.u 2662 } else { 2663 // A *rtype representing a struct is followed directly in memory by an 2664 // array of method objects representing the methods attached to the 2665 // struct. To get the same layout for a run time generated type, we 2666 // need an array directly following the uncommonType memory. 2667 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2668 tt := New(StructOf([]StructField{ 2669 {Name: "S", Type: TypeOf(structType{})}, 2670 {Name: "U", Type: TypeOf(uncommonType{})}, 2671 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2672 })) 2673 2674 typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer()) 2675 ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer()) 2676 2677 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods) 2678 } 2679 // TODO(sbinet): Once we allow embedding multiple types, 2680 // methods will need to be sorted like the compiler does. 2681 // TODO(sbinet): Once we allow non-exported methods, we will 2682 // need to compute xcount as the number of exported methods. 2683 ut.mcount = uint16(len(methods)) 2684 ut.xcount = ut.mcount 2685 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2686 2687 if len(fs) > 0 { 2688 repr = append(repr, ' ') 2689 } 2690 repr = append(repr, '}') 2691 hash = fnv1(hash, '}') 2692 str := string(repr) 2693 2694 // Round the size up to be a multiple of the alignment. 2695 size = align(size, uintptr(typalign)) 2696 2697 // Make the struct type. 2698 var istruct any = struct{}{} 2699 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2700 *typ = *prototype 2701 typ.fields = fs 2702 if pkgpath != "" { 2703 typ.pkgPath = newName(pkgpath, "", false) 2704 } 2705 2706 // Look in cache. 2707 if ts, ok := structLookupCache.m.Load(hash); ok { 2708 for _, st := range ts.([]Type) { 2709 t := st.common() 2710 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2711 return t 2712 } 2713 } 2714 } 2715 2716 // Not in cache, lock and retry. 2717 structLookupCache.Lock() 2718 defer structLookupCache.Unlock() 2719 if ts, ok := structLookupCache.m.Load(hash); ok { 2720 for _, st := range ts.([]Type) { 2721 t := st.common() 2722 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2723 return t 2724 } 2725 } 2726 } 2727 2728 addToCache := func(t Type) Type { 2729 var ts []Type 2730 if ti, ok := structLookupCache.m.Load(hash); ok { 2731 ts = ti.([]Type) 2732 } 2733 structLookupCache.m.Store(hash, append(ts, t)) 2734 return t 2735 } 2736 2737 // Look in known types. 2738 for _, t := range typesByString(str) { 2739 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2740 // even if 't' wasn't a structType with methods, we should be ok 2741 // as the 'u uncommonType' field won't be accessed except when 2742 // tflag&tflagUncommon is set. 2743 return addToCache(t) 2744 } 2745 } 2746 2747 typ.str = resolveReflectName(newName(str, "", false)) 2748 typ.tflag = 0 // TODO: set tflagRegularMemory 2749 typ.hash = hash 2750 typ.size = size 2751 typ.ptrdata = typeptrdata(typ.common()) 2752 typ.align = typalign 2753 typ.fieldAlign = typalign 2754 typ.ptrToThis = 0 2755 if len(methods) > 0 { 2756 typ.tflag |= tflagUncommon 2757 } 2758 2759 if hasGCProg { 2760 lastPtrField := 0 2761 for i, ft := range fs { 2762 if ft.typ.pointers() { 2763 lastPtrField = i 2764 } 2765 } 2766 prog := []byte{0, 0, 0, 0} // will be length of prog 2767 var off uintptr 2768 for i, ft := range fs { 2769 if i > lastPtrField { 2770 // gcprog should not include anything for any field after 2771 // the last field that contains pointer data 2772 break 2773 } 2774 if !ft.typ.pointers() { 2775 // Ignore pointerless fields. 2776 continue 2777 } 2778 // Pad to start of this field with zeros. 2779 if ft.offset() > off { 2780 n := (ft.offset() - off) / goarch.PtrSize 2781 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2782 if n > 1 { 2783 prog = append(prog, 0x81) // repeat previous bit 2784 prog = appendVarint(prog, n-1) // n-1 times 2785 } 2786 off = ft.offset() 2787 } 2788 2789 prog = appendGCProg(prog, ft.typ) 2790 off += ft.typ.ptrdata 2791 } 2792 prog = append(prog, 0) 2793 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2794 typ.kind |= kindGCProg 2795 typ.gcdata = &prog[0] 2796 } else { 2797 typ.kind &^= kindGCProg 2798 bv := new(bitVector) 2799 addTypeBits(bv, 0, typ.common()) 2800 if len(bv.data) > 0 { 2801 typ.gcdata = &bv.data[0] 2802 } 2803 } 2804 typ.equal = nil 2805 if comparable { 2806 typ.equal = func(p, q unsafe.Pointer) bool { 2807 for _, ft := range typ.fields { 2808 pi := add(p, ft.offset(), "&x.field safe") 2809 qi := add(q, ft.offset(), "&x.field safe") 2810 if !ft.typ.equal(pi, qi) { 2811 return false 2812 } 2813 } 2814 return true 2815 } 2816 } 2817 2818 switch { 2819 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2820 // structs of 1 direct iface type can be direct 2821 typ.kind |= kindDirectIface 2822 default: 2823 typ.kind &^= kindDirectIface 2824 } 2825 2826 return addToCache(&typ.rtype) 2827 } 2828 2829 // runtimeStructField takes a StructField value passed to StructOf and 2830 // returns both the corresponding internal representation, of type 2831 // structField, and the pkgpath value to use for this field. 2832 func runtimeStructField(field StructField) (structField, string) { 2833 if field.Anonymous && field.PkgPath != "" { 2834 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2835 } 2836 2837 if field.IsExported() { 2838 // Best-effort check for misuse. 2839 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2840 c := field.Name[0] 2841 if 'a' <= c && c <= 'z' || c == '_' { 2842 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2843 } 2844 } 2845 2846 offsetEmbed := uintptr(0) 2847 if field.Anonymous { 2848 offsetEmbed |= 1 2849 } 2850 2851 resolveReflectType(field.Type.common()) // install in runtime 2852 f := structField{ 2853 name: newName(field.Name, string(field.Tag), field.IsExported()), 2854 typ: field.Type.common(), 2855 offsetEmbed: offsetEmbed, 2856 } 2857 return f, field.PkgPath 2858 } 2859 2860 // typeptrdata returns the length in bytes of the prefix of t 2861 // containing pointer data. Anything after this offset is scalar data. 2862 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go 2863 func typeptrdata(t *rtype) uintptr { 2864 switch t.Kind() { 2865 case Struct: 2866 st := (*structType)(unsafe.Pointer(t)) 2867 // find the last field that has pointers. 2868 field := -1 2869 for i := range st.fields { 2870 ft := st.fields[i].typ 2871 if ft.pointers() { 2872 field = i 2873 } 2874 } 2875 if field == -1 { 2876 return 0 2877 } 2878 f := st.fields[field] 2879 return f.offset() + f.typ.ptrdata 2880 2881 default: 2882 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2883 } 2884 } 2885 2886 // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. 2887 const maxPtrmaskBytes = 2048 2888 2889 // ArrayOf returns the array type with the given length and element type. 2890 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2891 // 2892 // If the resulting type would be larger than the available address space, 2893 // ArrayOf panics. 2894 func ArrayOf(length int, elem Type) Type { 2895 if length < 0 { 2896 panic("reflect: negative length passed to ArrayOf") 2897 } 2898 2899 typ := elem.(*rtype) 2900 2901 // Look in cache. 2902 ckey := cacheKey{Array, typ, nil, uintptr(length)} 2903 if array, ok := lookupCache.Load(ckey); ok { 2904 return array.(Type) 2905 } 2906 2907 // Look in known types. 2908 s := "[" + strconv.Itoa(length) + "]" + typ.String() 2909 for _, tt := range typesByString(s) { 2910 array := (*arrayType)(unsafe.Pointer(tt)) 2911 if array.elem == typ { 2912 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2913 return ti.(Type) 2914 } 2915 } 2916 2917 // Make an array type. 2918 var iarray any = [1]unsafe.Pointer{} 2919 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2920 array := *prototype 2921 array.tflag = typ.tflag & tflagRegularMemory 2922 array.str = resolveReflectName(newName(s, "", false)) 2923 array.hash = fnv1(typ.hash, '[') 2924 for n := uint32(length); n > 0; n >>= 8 { 2925 array.hash = fnv1(array.hash, byte(n)) 2926 } 2927 array.hash = fnv1(array.hash, ']') 2928 array.elem = typ 2929 array.ptrToThis = 0 2930 if typ.size > 0 { 2931 max := ^uintptr(0) / typ.size 2932 if uintptr(length) > max { 2933 panic("reflect.ArrayOf: array size would exceed virtual address space") 2934 } 2935 } 2936 array.size = typ.size * uintptr(length) 2937 if length > 0 && typ.ptrdata != 0 { 2938 array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata 2939 } 2940 array.align = typ.align 2941 array.fieldAlign = typ.fieldAlign 2942 array.len = uintptr(length) 2943 array.slice = SliceOf(elem).(*rtype) 2944 2945 switch { 2946 case typ.ptrdata == 0 || array.size == 0: 2947 // No pointers. 2948 array.gcdata = nil 2949 array.ptrdata = 0 2950 2951 case length == 1: 2952 // In memory, 1-element array looks just like the element. 2953 array.kind |= typ.kind & kindGCProg 2954 array.gcdata = typ.gcdata 2955 array.ptrdata = typ.ptrdata 2956 2957 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize: 2958 // Element is small with pointer mask; array is still small. 2959 // Create direct pointer mask by turning each 1 bit in elem 2960 // into length 1 bits in larger mask. 2961 mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8) 2962 emitGCMask(mask, 0, typ, array.len) 2963 array.gcdata = &mask[0] 2964 2965 default: 2966 // Create program that emits one element 2967 // and then repeats to make the array. 2968 prog := []byte{0, 0, 0, 0} // will be length of prog 2969 prog = appendGCProg(prog, typ) 2970 // Pad from ptrdata to size. 2971 elemPtrs := typ.ptrdata / goarch.PtrSize 2972 elemWords := typ.size / goarch.PtrSize 2973 if elemPtrs < elemWords { 2974 // Emit literal 0 bit, then repeat as needed. 2975 prog = append(prog, 0x01, 0x00) 2976 if elemPtrs+1 < elemWords { 2977 prog = append(prog, 0x81) 2978 prog = appendVarint(prog, elemWords-elemPtrs-1) 2979 } 2980 } 2981 // Repeat length-1 times. 2982 if elemWords < 0x80 { 2983 prog = append(prog, byte(elemWords|0x80)) 2984 } else { 2985 prog = append(prog, 0x80) 2986 prog = appendVarint(prog, elemWords) 2987 } 2988 prog = appendVarint(prog, uintptr(length)-1) 2989 prog = append(prog, 0) 2990 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2991 array.kind |= kindGCProg 2992 array.gcdata = &prog[0] 2993 array.ptrdata = array.size // overestimate but ok; must match program 2994 } 2995 2996 etyp := typ.common() 2997 esize := etyp.Size() 2998 2999 array.equal = nil 3000 if eequal := etyp.equal; eequal != nil { 3001 array.equal = func(p, q unsafe.Pointer) bool { 3002 for i := 0; i < length; i++ { 3003 pi := arrayAt(p, i, esize, "i < length") 3004 qi := arrayAt(q, i, esize, "i < length") 3005 if !eequal(pi, qi) { 3006 return false 3007 } 3008 3009 } 3010 return true 3011 } 3012 } 3013 3014 switch { 3015 case length == 1 && !ifaceIndir(typ): 3016 // array of 1 direct iface type can be direct 3017 array.kind |= kindDirectIface 3018 default: 3019 array.kind &^= kindDirectIface 3020 } 3021 3022 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) 3023 return ti.(Type) 3024 } 3025 3026 func appendVarint(x []byte, v uintptr) []byte { 3027 for ; v >= 0x80; v >>= 7 { 3028 x = append(x, byte(v|0x80)) 3029 } 3030 x = append(x, byte(v)) 3031 return x 3032 } 3033 3034 // toType converts from a *rtype to a Type that can be returned 3035 // to the client of package reflect. In gc, the only concern is that 3036 // a nil *rtype must be replaced by a nil Type, but in gccgo this 3037 // function takes care of ensuring that multiple *rtype for the same 3038 // type are coalesced into a single Type. 3039 func toType(t *rtype) Type { 3040 if t == nil { 3041 return nil 3042 } 3043 return t 3044 } 3045 3046 type layoutKey struct { 3047 ftyp *funcType // function signature 3048 rcvr *rtype // receiver type, or nil if none 3049 } 3050 3051 type layoutType struct { 3052 t *rtype 3053 framePool *sync.Pool 3054 abi abiDesc 3055 } 3056 3057 var layoutCache sync.Map // map[layoutKey]layoutType 3058 3059 // funcLayout computes a struct type representing the layout of the 3060 // stack-assigned function arguments and return values for the function 3061 // type t. 3062 // If rcvr != nil, rcvr specifies the type of the receiver. 3063 // The returned type exists only for GC, so we only fill out GC relevant info. 3064 // Currently, that's just size and the GC program. We also fill in 3065 // the name for possible debugging use. 3066 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) { 3067 if t.Kind() != Func { 3068 panic("reflect: funcLayout of non-func type " + t.String()) 3069 } 3070 if rcvr != nil && rcvr.Kind() == Interface { 3071 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3072 } 3073 k := layoutKey{t, rcvr} 3074 if lti, ok := layoutCache.Load(k); ok { 3075 lt := lti.(layoutType) 3076 return lt.t, lt.framePool, lt.abi 3077 } 3078 3079 // Compute the ABI layout. 3080 abi = newAbiDesc(t, rcvr) 3081 3082 // build dummy rtype holding gc program 3083 x := &rtype{ 3084 align: goarch.PtrSize, 3085 // Don't add spill space here; it's only necessary in 3086 // reflectcall's frame, not in the allocated frame. 3087 // TODO(mknyszek): Remove this comment when register 3088 // spill space in the frame is no longer required. 3089 size: align(abi.retOffset+abi.ret.stackBytes, goarch.PtrSize), 3090 ptrdata: uintptr(abi.stackPtrs.n) * goarch.PtrSize, 3091 } 3092 if abi.stackPtrs.n > 0 { 3093 x.gcdata = &abi.stackPtrs.data[0] 3094 } 3095 3096 var s string 3097 if rcvr != nil { 3098 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3099 } else { 3100 s = "funcargs(" + t.String() + ")" 3101 } 3102 x.str = resolveReflectName(newName(s, "", false)) 3103 3104 // cache result for future callers 3105 framePool = &sync.Pool{New: func() any { 3106 return unsafe_New(x) 3107 }} 3108 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 3109 t: x, 3110 framePool: framePool, 3111 abi: abi, 3112 }) 3113 lt := lti.(layoutType) 3114 return lt.t, lt.framePool, lt.abi 3115 } 3116 3117 // ifaceIndir reports whether t is stored indirectly in an interface value. 3118 func ifaceIndir(t *rtype) bool { 3119 return t.kind&kindDirectIface == 0 3120 } 3121 3122 // Note: this type must agree with runtime.bitvector. 3123 type bitVector struct { 3124 n uint32 // number of bits 3125 data []byte 3126 } 3127 3128 // append a bit to the bitmap. 3129 func (bv *bitVector) append(bit uint8) { 3130 if bv.n%8 == 0 { 3131 bv.data = append(bv.data, 0) 3132 } 3133 bv.data[bv.n/8] |= bit << (bv.n % 8) 3134 bv.n++ 3135 } 3136 3137 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3138 if t.ptrdata == 0 { 3139 return 3140 } 3141 3142 switch Kind(t.kind & kindMask) { 3143 case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: 3144 // 1 pointer at start of representation 3145 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 3146 bv.append(0) 3147 } 3148 bv.append(1) 3149 3150 case Interface: 3151 // 2 pointers 3152 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 3153 bv.append(0) 3154 } 3155 bv.append(1) 3156 bv.append(1) 3157 3158 case Array: 3159 // repeat inner type 3160 tt := (*arrayType)(unsafe.Pointer(t)) 3161 for i := 0; i < int(tt.len); i++ { 3162 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3163 } 3164 3165 case Struct: 3166 // apply fields 3167 tt := (*structType)(unsafe.Pointer(t)) 3168 for i := range tt.fields { 3169 f := &tt.fields[i] 3170 addTypeBits(bv, offset+f.offset(), f.typ) 3171 } 3172 } 3173 }