github.com/goplus/xtypes@v0.2.1/internal/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "strconv" 20 "sync" 21 "unicode" 22 "unicode/utf8" 23 "unsafe" 24 25 "github.com/goplus/xtypes/internal/unsafeheader" 26 ) 27 28 // Type is the representation of a Go type. 29 // 30 // Not all methods apply to all kinds of types. Restrictions, 31 // if any, are noted in the documentation for each method. 32 // Use the Kind method to find out the kind of type before 33 // calling kind-specific methods. Calling a method 34 // inappropriate to the kind of type causes a run-time panic. 35 // 36 // Type values are comparable, such as with the == operator, 37 // so they can be used as map keys. 38 // Two Type values are equal if they represent identical types. 39 type Type interface { 40 // Methods applicable to all types. 41 42 // Align returns the alignment in bytes of a value of 43 // this type when allocated in memory. 44 Align() int 45 46 // FieldAlign returns the alignment in bytes of a value of 47 // this type when used as a field in a struct. 48 FieldAlign() int 49 50 // Method returns the i'th method in the type's method set. 51 // It panics if i is not in the range [0, NumMethod()). 52 // 53 // For a non-interface type T or *T, the returned Method's Type and Func 54 // fields describe a function whose first argument is the receiver, 55 // and only exported methods are accessible. 56 // 57 // For an interface type, the returned Method's Type field gives the 58 // method signature, without a receiver, and the Func field is nil. 59 // 60 // Methods are sorted in lexicographic order. 61 Method(int) Method 62 63 // MethodByName returns the method with that name in the type's 64 // method set and a boolean indicating if the method was found. 65 // 66 // For a non-interface type T or *T, the returned Method's Type and Func 67 // fields describe a function whose first argument is the receiver. 68 // 69 // For an interface type, the returned Method's Type field gives the 70 // method signature, without a receiver, and the Func field is nil. 71 MethodByName(string) (Method, bool) 72 73 // NumMethod returns the number of methods accessible using Method. 74 // 75 // Note that NumMethod counts unexported methods only for interface types. 76 NumMethod() int 77 78 // Name returns the type's name within its package for a defined type. 79 // For other (non-defined) types it returns the empty string. 80 Name() string 81 82 // PkgPath returns a defined type's package path, that is, the import path 83 // that uniquely identifies the package, such as "encoding/base64". 84 // If the type was predeclared (string, error) or not defined (*T, struct{}, 85 // []int, or A where A is an alias for a non-defined type), the package path 86 // will be the empty string. 87 PkgPath() string 88 89 // Size returns the number of bytes needed to store 90 // a value of the given type; it is analogous to unsafe.Sizeof. 91 Size() uintptr 92 93 // String returns a string representation of the type. 94 // The string representation may use shortened package names 95 // (e.g., base64 instead of "encoding/base64") and is not 96 // guaranteed to be unique among types. To test for type identity, 97 // compare the Types directly. 98 String() string 99 100 // Kind returns the specific kind of this type. 101 Kind() Kind 102 103 // Implements reports whether the type implements the interface type u. 104 Implements(u Type) bool 105 106 // AssignableTo reports whether a value of the type is assignable to type u. 107 AssignableTo(u Type) bool 108 109 // ConvertibleTo reports whether a value of the type is convertible to type u. 110 ConvertibleTo(u Type) bool 111 112 // Comparable reports whether values of this type are comparable. 113 Comparable() bool 114 115 // Methods applicable only to some types, depending on Kind. 116 // The methods allowed for each kind are: 117 // 118 // Int*, Uint*, Float*, Complex*: Bits 119 // Array: Elem, Len 120 // Chan: ChanDir, Elem 121 // Func: In, NumIn, Out, NumOut, IsVariadic. 122 // Map: Key, Elem 123 // Ptr: Elem 124 // Slice: Elem 125 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 126 127 // Bits returns the size of the type in bits. 128 // It panics if the type's Kind is not one of the 129 // sized or unsized Int, Uint, Float, or Complex kinds. 130 Bits() int 131 132 // ChanDir returns a channel type's direction. 133 // It panics if the type's Kind is not Chan. 134 ChanDir() ChanDir 135 136 // IsVariadic reports whether a function type's final input parameter 137 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 138 // implicit actual type []T. 139 // 140 // For concreteness, if t represents func(x int, y ... float64), then 141 // 142 // t.NumIn() == 2 143 // t.In(0) is the reflect.Type for "int" 144 // t.In(1) is the reflect.Type for "[]float64" 145 // t.IsVariadic() == true 146 // 147 // IsVariadic panics if the type's Kind is not Func. 148 IsVariadic() bool 149 150 // Elem returns a type's element type. 151 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 152 Elem() Type 153 154 // Field returns a struct type's i'th field. 155 // It panics if the type's Kind is not Struct. 156 // It panics if i is not in the range [0, NumField()). 157 Field(i int) StructField 158 159 // FieldByIndex returns the nested field corresponding 160 // to the index sequence. It is equivalent to calling Field 161 // successively for each index i. 162 // It panics if the type's Kind is not Struct. 163 FieldByIndex(index []int) StructField 164 165 // FieldByName returns the struct field with the given name 166 // and a boolean indicating if the field was found. 167 FieldByName(name string) (StructField, bool) 168 169 // FieldByNameFunc returns the struct field with a name 170 // that satisfies the match function and a boolean indicating if 171 // the field was found. 172 // 173 // FieldByNameFunc considers the fields in the struct itself 174 // and then the fields in any embedded structs, in breadth first order, 175 // stopping at the shallowest nesting depth containing one or more 176 // fields satisfying the match function. If multiple fields at that depth 177 // satisfy the match function, they cancel each other 178 // and FieldByNameFunc returns no match. 179 // This behavior mirrors Go's handling of name lookup in 180 // structs containing embedded fields. 181 FieldByNameFunc(match func(string) bool) (StructField, bool) 182 183 // In returns the type of a function type's i'th input parameter. 184 // It panics if the type's Kind is not Func. 185 // It panics if i is not in the range [0, NumIn()). 186 In(i int) Type 187 188 // Key returns a map type's key type. 189 // It panics if the type's Kind is not Map. 190 Key() Type 191 192 // Len returns an array type's length. 193 // It panics if the type's Kind is not Array. 194 Len() int 195 196 // NumField returns a struct type's field count. 197 // It panics if the type's Kind is not Struct. 198 NumField() int 199 200 // NumIn returns a function type's input parameter count. 201 // It panics if the type's Kind is not Func. 202 NumIn() int 203 204 // NumOut returns a function type's output parameter count. 205 // It panics if the type's Kind is not Func. 206 NumOut() int 207 208 // Out returns the type of a function type's i'th output parameter. 209 // It panics if the type's Kind is not Func. 210 // It panics if i is not in the range [0, NumOut()). 211 Out(i int) Type 212 213 common() *rtype 214 uncommon() *uncommonType 215 } 216 217 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 218 // if the names are equal, even if they are unexported names originating 219 // in different packages. The practical effect of this is that the result of 220 // t.FieldByName("x") is not well defined if the struct type t contains 221 // multiple fields named x (embedded from different packages). 222 // FieldByName may return one of the fields named x or may report that there are none. 223 // See https://golang.org/issue/4876 for more details. 224 225 /* 226 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 227 * A few are known to ../runtime/type.go to convey to debuggers. 228 * They are also known to ../runtime/type.go. 229 */ 230 231 // A Kind represents the specific kind of type that a Type represents. 232 // The zero Kind is not a valid kind. 233 type Kind uint 234 235 const ( 236 Invalid Kind = iota 237 Bool 238 Int 239 Int8 240 Int16 241 Int32 242 Int64 243 Uint 244 Uint8 245 Uint16 246 Uint32 247 Uint64 248 Uintptr 249 Float32 250 Float64 251 Complex64 252 Complex128 253 Array 254 Chan 255 Func 256 Interface 257 Map 258 Ptr 259 Slice 260 String 261 Struct 262 UnsafePointer 263 ) 264 265 // tflag is used by an rtype to signal what extra type information is 266 // available in the memory directly following the rtype value. 267 // 268 // tflag values must be kept in sync with copies in: 269 // cmd/compile/internal/gc/reflect.go 270 // cmd/link/internal/ld/decodesym.go 271 // runtime/type.go 272 type tflag uint8 273 274 const ( 275 // tflagUncommon means that there is a pointer, *uncommonType, 276 // just beyond the outer type structure. 277 // 278 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 279 // then t has uncommonType data and it can be accessed as: 280 // 281 // type tUncommon struct { 282 // structType 283 // u uncommonType 284 // } 285 // u := &(*tUncommon)(unsafe.Pointer(t)).u 286 tflagUncommon tflag = 1 << 0 287 288 // tflagExtraStar means the name in the str field has an 289 // extraneous '*' prefix. This is because for most types T in 290 // a program, the type *T also exists and reusing the str data 291 // saves binary size. 292 tflagExtraStar tflag = 1 << 1 293 294 // tflagNamed means the type has a name. 295 tflagNamed tflag = 1 << 2 296 297 // tflagRegularMemory means that equal and hash functions can treat 298 // this type as a single region of t.size bytes. 299 tflagRegularMemory tflag = 1 << 3 300 ) 301 302 // rtype is the common implementation of most values. 303 // It is embedded in other struct types. 304 // 305 // rtype must be kept in sync with ../runtime/type.go:/^type._type. 306 type rtype struct { 307 size uintptr 308 ptrdata uintptr // number of bytes in the type that can contain pointers 309 hash uint32 // hash of type; avoids computation in hash tables 310 tflag tflag // extra type information flags 311 align uint8 // alignment of variable with this type 312 fieldAlign uint8 // alignment of struct field with this type 313 kind uint8 // enumeration for C 314 // function for comparing objects of this type 315 // (ptr to object A, ptr to object B) -> ==? 316 equal func(unsafe.Pointer, unsafe.Pointer) bool 317 gcdata *byte // garbage collection data 318 str nameOff // string form 319 ptrToThis typeOff // type for pointer to this type, may be zero 320 } 321 322 // Method on non-interface type 323 type method struct { 324 name nameOff // name of method 325 mtyp typeOff // method type (without receiver) 326 ifn textOff // fn used in interface call (one-word receiver) 327 tfn textOff // fn used for normal method call 328 } 329 330 // uncommonType is present only for defined types or types with methods 331 // (if T is a defined type, the uncommonTypes for T and *T have methods). 332 // Using a pointer to this struct reduces the overall size required 333 // to describe a non-defined type with no methods. 334 type uncommonType struct { 335 pkgPath nameOff // import path; empty for built-in types like int, string 336 mcount uint16 // number of methods 337 xcount uint16 // number of exported methods 338 moff uint32 // offset from this uncommontype to [mcount]method 339 _ uint32 // unused 340 } 341 342 // ChanDir represents a channel type's direction. 343 type ChanDir int 344 345 const ( 346 RecvDir ChanDir = 1 << iota // <-chan 347 SendDir // chan<- 348 BothDir = RecvDir | SendDir // chan 349 ) 350 351 // arrayType represents a fixed array type. 352 type arrayType struct { 353 rtype 354 elem *rtype // array element type 355 slice *rtype // slice type 356 len uintptr 357 } 358 359 // chanType represents a channel type. 360 type chanType struct { 361 rtype 362 elem *rtype // channel element type 363 dir uintptr // channel direction (ChanDir) 364 } 365 366 // funcType represents a function type. 367 // 368 // A *rtype for each in and out parameter is stored in an array that 369 // directly follows the funcType (and possibly its uncommonType). So 370 // a function type with one method, one input, and one output is: 371 // 372 // struct { 373 // funcType 374 // uncommonType 375 // [2]*rtype // [0] is in, [1] is out 376 // } 377 type funcType struct { 378 rtype 379 inCount uint16 380 outCount uint16 // top bit is set if last input parameter is ... 381 } 382 383 // imethod represents a method on an interface type 384 type imethod struct { 385 name nameOff // name of method 386 typ typeOff // .(*FuncType) underneath 387 } 388 389 // interfaceType represents an interface type. 390 type interfaceType struct { 391 rtype 392 pkgPath name // import path 393 methods []imethod // sorted by hash 394 } 395 396 // mapType represents a map type. 397 type mapType struct { 398 rtype 399 key *rtype // map key type 400 elem *rtype // map element (value) type 401 bucket *rtype // internal bucket structure 402 // function for hashing keys (ptr to key, seed) -> hash 403 hasher func(unsafe.Pointer, uintptr) uintptr 404 keysize uint8 // size of key slot 405 valuesize uint8 // size of value slot 406 bucketsize uint16 // size of bucket 407 flags uint32 408 } 409 410 // ptrType represents a pointer type. 411 type ptrType struct { 412 rtype 413 elem *rtype // pointer element (pointed at) type 414 } 415 416 // sliceType represents a slice type. 417 type sliceType struct { 418 rtype 419 elem *rtype // slice element type 420 } 421 422 // Struct field 423 type structField struct { 424 name name // name is always non-empty 425 typ *rtype // type of field 426 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded 427 } 428 429 func (f *structField) offset() uintptr { 430 return f.offsetEmbed >> 1 431 } 432 433 func (f *structField) embedded() bool { 434 return f.offsetEmbed&1 != 0 435 } 436 437 // structType represents a struct type. 438 type structType struct { 439 rtype 440 pkgPath name 441 fields []structField // sorted by offset 442 } 443 444 // name is an encoded type name with optional extra data. 445 // 446 // The first byte is a bit field containing: 447 // 448 // 1<<0 the name is exported 449 // 1<<1 tag data follows the name 450 // 1<<2 pkgPath nameOff follows the name and tag 451 // 452 // The next two bytes are the data length: 453 // 454 // l := uint16(data[1])<<8 | uint16(data[2]) 455 // 456 // Bytes [3:3+l] are the string data. 457 // 458 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 459 // with the data following. 460 // 461 // If the import path follows, then 4 bytes at the end of 462 // the data form a nameOff. The import path is only set for concrete 463 // methods that are defined in a different package than their type. 464 // 465 // If a name starts with "*", then the exported bit represents 466 // whether the pointed to type is exported. 467 type name struct { 468 bytes *byte 469 } 470 471 func (n name) data(off int, whySafe string) *byte { 472 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe)) 473 } 474 475 func (n name) isExported() bool { 476 return (*n.bytes)&(1<<0) != 0 477 } 478 479 func (n name) nameLen() int { 480 return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field"))) 481 } 482 483 func (n name) tagLen() int { 484 if *n.data(0, "name flag field")&(1<<1) == 0 { 485 return 0 486 } 487 off := 3 + n.nameLen() 488 return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field"))) 489 } 490 491 func (n name) name() (s string) { 492 if n.bytes == nil { 493 return 494 } 495 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 496 497 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 498 hdr.Data = unsafe.Pointer(&b[3]) 499 hdr.Len = int(b[1])<<8 | int(b[2]) 500 return s 501 } 502 503 func (n name) tag() (s string) { 504 tl := n.tagLen() 505 if tl == 0 { 506 return "" 507 } 508 nl := n.nameLen() 509 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 510 hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string")) 511 hdr.Len = tl 512 return s 513 } 514 515 func (n name) pkgPath() string { 516 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 { 517 return "" 518 } 519 off := 3 + n.nameLen() 520 if tl := n.tagLen(); tl > 0 { 521 off += 2 + tl 522 } 523 var nameOff int32 524 // Note that this field may not be aligned in memory, 525 // so we cannot use a direct int32 assignment here. 526 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:]) 527 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 528 return pkgPathName.name() 529 } 530 531 func newName(n, tag string, exported bool) name { 532 if len(n) > 1<<16-1 { 533 panic("reflect.nameFrom: name too long: " + n) 534 } 535 if len(tag) > 1<<16-1 { 536 panic("reflect.nameFrom: tag too long: " + tag) 537 } 538 539 var bits byte 540 l := 1 + 2 + len(n) 541 if exported { 542 bits |= 1 << 0 543 } 544 if len(tag) > 0 { 545 l += 2 + len(tag) 546 bits |= 1 << 1 547 } 548 549 b := make([]byte, l) 550 b[0] = bits 551 b[1] = uint8(len(n) >> 8) 552 b[2] = uint8(len(n)) 553 copy(b[3:], n) 554 if len(tag) > 0 { 555 tb := b[3+len(n):] 556 tb[0] = uint8(len(tag) >> 8) 557 tb[1] = uint8(len(tag)) 558 copy(tb[2:], tag) 559 } 560 561 return name{bytes: &b[0]} 562 } 563 564 /* 565 * The compiler knows the exact layout of all the data structures above. 566 * The compiler does not know about the data structures and methods below. 567 */ 568 569 // Method represents a single method. 570 type Method struct { 571 // Name is the method name. 572 // PkgPath is the package path that qualifies a lower case (unexported) 573 // method name. It is empty for upper case (exported) method names. 574 // The combination of PkgPath and Name uniquely identifies a method 575 // in a method set. 576 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 577 Name string 578 PkgPath string 579 580 Type Type // method type 581 Func Value // func with receiver as first argument 582 Index int // index for Type.Method 583 } 584 585 const ( 586 kindDirectIface = 1 << 5 587 kindGCProg = 1 << 6 // Type.gc points to GC program 588 kindMask = (1 << 5) - 1 589 ) 590 591 // String returns the name of k. 592 func (k Kind) String() string { 593 if int(k) < len(kindNames) { 594 return kindNames[k] 595 } 596 return "kind" + strconv.Itoa(int(k)) 597 } 598 599 var kindNames = []string{ 600 Invalid: "invalid", 601 Bool: "bool", 602 Int: "int", 603 Int8: "int8", 604 Int16: "int16", 605 Int32: "int32", 606 Int64: "int64", 607 Uint: "uint", 608 Uint8: "uint8", 609 Uint16: "uint16", 610 Uint32: "uint32", 611 Uint64: "uint64", 612 Uintptr: "uintptr", 613 Float32: "float32", 614 Float64: "float64", 615 Complex64: "complex64", 616 Complex128: "complex128", 617 Array: "array", 618 Chan: "chan", 619 Func: "func", 620 Interface: "interface", 621 Map: "map", 622 Ptr: "ptr", 623 Slice: "slice", 624 String: "string", 625 Struct: "struct", 626 UnsafePointer: "unsafe.Pointer", 627 } 628 629 func (t *uncommonType) methods() []method { 630 if t.mcount == 0 { 631 return nil 632 } 633 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] 634 } 635 636 func (t *uncommonType) exportedMethods() []method { 637 return t.methods() 638 if t.xcount == 0 { 639 return nil 640 } 641 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount] 642 } 643 644 // resolveNameOff resolves a name offset from a base pointer. 645 // The (*rtype).nameOff method is a convenience wrapper for this function. 646 // Implemented in the runtime package. 647 //go:linkname resolveNameOff reflect.resolveNameOff 648 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 649 650 // resolveTypeOff resolves an *rtype offset from a base type. 651 // The (*rtype).typeOff method is a convenience wrapper for this function. 652 // Implemented in the runtime package. 653 //go:linkname resolveTypeOff reflect.resolveTypeOff 654 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 655 656 // resolveTextOff resolves a function pointer offset from a base type. 657 // The (*rtype).textOff method is a convenience wrapper for this function. 658 // Implemented in the runtime package. 659 //go:linkname resolveTextOff reflect.resolveTextOff 660 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 661 662 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 663 // It returns a new ID that can be used as a typeOff or textOff, and will 664 // be resolved correctly. Implemented in the runtime package. 665 //go:linkname addReflectOff reflect.addReflectOff 666 func addReflectOff(ptr unsafe.Pointer) int32 667 668 // resolveReflectName adds a name to the reflection lookup map in the runtime. 669 // It returns a new nameOff that can be used to refer to the pointer. 670 func resolveReflectName(n name) nameOff { 671 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 672 } 673 674 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 675 // It returns a new typeOff that can be used to refer to the pointer. 676 func resolveReflectType(t *rtype) typeOff { 677 return typeOff(addReflectOff(unsafe.Pointer(t))) 678 } 679 680 // resolveReflectText adds a function pointer to the reflection lookup map in 681 // the runtime. It returns a new textOff that can be used to refer to the 682 // pointer. 683 func resolveReflectText(ptr unsafe.Pointer) textOff { 684 return textOff(addReflectOff(ptr)) 685 } 686 687 type nameOff int32 // offset to a name 688 type typeOff int32 // offset to an *rtype 689 type textOff int32 // offset from top of text section 690 691 func (t *rtype) nameOff(off nameOff) name { 692 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 693 } 694 695 func (t *rtype) typeOff(off typeOff) *rtype { 696 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 697 } 698 699 func (t *rtype) textOff(off textOff) unsafe.Pointer { 700 return resolveTextOff(unsafe.Pointer(t), int32(off)) 701 } 702 703 func (t *rtype) uncommon() *uncommonType { 704 if t.tflag&tflagUncommon == 0 { 705 return nil 706 } 707 switch t.Kind() { 708 case Struct: 709 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 710 case Ptr: 711 type u struct { 712 ptrType 713 u uncommonType 714 } 715 return &(*u)(unsafe.Pointer(t)).u 716 case Func: 717 type u struct { 718 funcType 719 u uncommonType 720 } 721 return &(*u)(unsafe.Pointer(t)).u 722 case Slice: 723 type u struct { 724 sliceType 725 u uncommonType 726 } 727 return &(*u)(unsafe.Pointer(t)).u 728 case Array: 729 type u struct { 730 arrayType 731 u uncommonType 732 } 733 return &(*u)(unsafe.Pointer(t)).u 734 case Chan: 735 type u struct { 736 chanType 737 u uncommonType 738 } 739 return &(*u)(unsafe.Pointer(t)).u 740 case Map: 741 type u struct { 742 mapType 743 u uncommonType 744 } 745 return &(*u)(unsafe.Pointer(t)).u 746 case Interface: 747 type u struct { 748 interfaceType 749 u uncommonType 750 } 751 return &(*u)(unsafe.Pointer(t)).u 752 default: 753 type u struct { 754 rtype 755 u uncommonType 756 } 757 return &(*u)(unsafe.Pointer(t)).u 758 } 759 } 760 761 func (t *rtype) String() string { 762 s := t.nameOff(t.str).name() 763 if t.tflag&tflagExtraStar != 0 { 764 return s[1:] 765 } 766 return s 767 } 768 769 func (t *rtype) Size() uintptr { return t.size } 770 771 func (t *rtype) Bits() int { 772 if t == nil { 773 panic("reflect: Bits of nil Type") 774 } 775 k := t.Kind() 776 if k < Int || k > Complex128 { 777 panic("reflect: Bits of non-arithmetic Type " + t.String()) 778 } 779 return int(t.size) * 8 780 } 781 782 func (t *rtype) Align() int { return int(t.align) } 783 784 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 785 786 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 787 788 func (t *rtype) pointers() bool { return t.ptrdata != 0 } 789 790 func (t *rtype) common() *rtype { return t } 791 792 func (t *rtype) exportedMethods() []method { 793 ut := t.uncommon() 794 if ut == nil { 795 return nil 796 } 797 return ut.exportedMethods() 798 } 799 800 func (t *rtype) NumMethod() int { 801 if t.Kind() == Interface { 802 tt := (*interfaceType)(unsafe.Pointer(t)) 803 return tt.NumMethod() 804 } 805 return len(t.exportedMethods()) 806 } 807 808 func (t *rtype) Method(i int) (m Method) { 809 if t.Kind() == Interface { 810 tt := (*interfaceType)(unsafe.Pointer(t)) 811 return tt.Method(i) 812 } 813 methods := t.exportedMethods() 814 if i < 0 || i >= len(methods) { 815 panic("reflect: Method index out of range") 816 } 817 p := methods[i] 818 pname := t.nameOff(p.name) 819 m.Name = pname.name() 820 fl := flag(Func) 821 mtyp := t.typeOff(p.mtyp) 822 ft := (*funcType)(unsafe.Pointer(mtyp)) 823 in := make([]Type, 0, 1+len(ft.in())) 824 in = append(in, t) 825 for _, arg := range ft.in() { 826 in = append(in, arg) 827 } 828 out := make([]Type, 0, len(ft.out())) 829 for _, ret := range ft.out() { 830 out = append(out, ret) 831 } 832 mt := FuncOf(in, out, ft.IsVariadic()) 833 m.Type = mt 834 tfn := t.textOff(p.tfn) 835 fn := unsafe.Pointer(&tfn) 836 m.Func = Value{mt.(*rtype), fn, fl} 837 838 m.Index = i 839 return m 840 } 841 842 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 843 if t.Kind() == Interface { 844 tt := (*interfaceType)(unsafe.Pointer(t)) 845 return tt.MethodByName(name) 846 } 847 ut := t.uncommon() 848 if ut == nil { 849 return Method{}, false 850 } 851 // TODO(mdempsky): Binary search. 852 for i, p := range ut.exportedMethods() { 853 if t.nameOff(p.name).name() == name { 854 return t.Method(i), true 855 } 856 } 857 return Method{}, false 858 } 859 860 func (t *rtype) PkgPath() string { 861 if t.tflag&tflagNamed == 0 { 862 return "" 863 } 864 ut := t.uncommon() 865 if ut == nil { 866 return "" 867 } 868 return t.nameOff(ut.pkgPath).name() 869 } 870 871 func (t *rtype) hasName() bool { 872 return t.tflag&tflagNamed != 0 873 } 874 875 func (t *rtype) Name() string { 876 if !t.hasName() { 877 return "" 878 } 879 s := t.String() 880 i := len(s) - 1 881 for i >= 0 && s[i] != '.' { 882 i-- 883 } 884 return s[i+1:] 885 } 886 887 func (t *rtype) ChanDir() ChanDir { 888 if t.Kind() != Chan { 889 panic("reflect: ChanDir of non-chan type " + t.String()) 890 } 891 tt := (*chanType)(unsafe.Pointer(t)) 892 return ChanDir(tt.dir) 893 } 894 895 func (t *rtype) IsVariadic() bool { 896 if t.Kind() != Func { 897 panic("reflect: IsVariadic of non-func type " + t.String()) 898 } 899 tt := (*funcType)(unsafe.Pointer(t)) 900 return tt.outCount&(1<<15) != 0 901 } 902 903 func (t *rtype) Elem() Type { 904 switch t.Kind() { 905 case Array: 906 tt := (*arrayType)(unsafe.Pointer(t)) 907 return toType(tt.elem) 908 case Chan: 909 tt := (*chanType)(unsafe.Pointer(t)) 910 return toType(tt.elem) 911 case Map: 912 tt := (*mapType)(unsafe.Pointer(t)) 913 return toType(tt.elem) 914 case Ptr: 915 tt := (*ptrType)(unsafe.Pointer(t)) 916 return toType(tt.elem) 917 case Slice: 918 tt := (*sliceType)(unsafe.Pointer(t)) 919 return toType(tt.elem) 920 } 921 panic("reflect: Elem of invalid type " + t.String()) 922 } 923 924 func (t *rtype) Field(i int) StructField { 925 if t.Kind() != Struct { 926 panic("reflect: Field of non-struct type " + t.String()) 927 } 928 tt := (*structType)(unsafe.Pointer(t)) 929 return tt.Field(i) 930 } 931 932 func (t *rtype) FieldByIndex(index []int) StructField { 933 if t.Kind() != Struct { 934 panic("reflect: FieldByIndex of non-struct type " + t.String()) 935 } 936 tt := (*structType)(unsafe.Pointer(t)) 937 return tt.FieldByIndex(index) 938 } 939 940 func (t *rtype) FieldByName(name string) (StructField, bool) { 941 if t.Kind() != Struct { 942 panic("reflect: FieldByName of non-struct type " + t.String()) 943 } 944 tt := (*structType)(unsafe.Pointer(t)) 945 return tt.FieldByName(name) 946 } 947 948 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 949 if t.Kind() != Struct { 950 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 951 } 952 tt := (*structType)(unsafe.Pointer(t)) 953 return tt.FieldByNameFunc(match) 954 } 955 956 func (t *rtype) In(i int) Type { 957 if t.Kind() != Func { 958 panic("reflect: In of non-func type " + t.String()) 959 } 960 tt := (*funcType)(unsafe.Pointer(t)) 961 return toType(tt.in()[i]) 962 } 963 964 func (t *rtype) Key() Type { 965 if t.Kind() != Map { 966 panic("reflect: Key of non-map type " + t.String()) 967 } 968 tt := (*mapType)(unsafe.Pointer(t)) 969 return toType(tt.key) 970 } 971 972 func (t *rtype) Len() int { 973 if t.Kind() != Array { 974 panic("reflect: Len of non-array type " + t.String()) 975 } 976 tt := (*arrayType)(unsafe.Pointer(t)) 977 return int(tt.len) 978 } 979 980 func (t *rtype) NumField() int { 981 if t.Kind() != Struct { 982 panic("reflect: NumField of non-struct type " + t.String()) 983 } 984 tt := (*structType)(unsafe.Pointer(t)) 985 return len(tt.fields) 986 } 987 988 func (t *rtype) NumIn() int { 989 if t.Kind() != Func { 990 panic("reflect: NumIn of non-func type " + t.String()) 991 } 992 tt := (*funcType)(unsafe.Pointer(t)) 993 return int(tt.inCount) 994 } 995 996 func (t *rtype) NumOut() int { 997 if t.Kind() != Func { 998 panic("reflect: NumOut of non-func type " + t.String()) 999 } 1000 tt := (*funcType)(unsafe.Pointer(t)) 1001 return len(tt.out()) 1002 } 1003 1004 func (t *rtype) Out(i int) Type { 1005 if t.Kind() != Func { 1006 panic("reflect: Out of non-func type " + t.String()) 1007 } 1008 tt := (*funcType)(unsafe.Pointer(t)) 1009 return toType(tt.out()[i]) 1010 } 1011 1012 func (t *funcType) in() []*rtype { 1013 uadd := unsafe.Sizeof(*t) 1014 if t.tflag&tflagUncommon != 0 { 1015 uadd += unsafe.Sizeof(uncommonType{}) 1016 } 1017 if t.inCount == 0 { 1018 return nil 1019 } 1020 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount] 1021 } 1022 1023 func (t *funcType) out() []*rtype { 1024 uadd := unsafe.Sizeof(*t) 1025 if t.tflag&tflagUncommon != 0 { 1026 uadd += unsafe.Sizeof(uncommonType{}) 1027 } 1028 outCount := t.outCount & (1<<15 - 1) 1029 if outCount == 0 { 1030 return nil 1031 } 1032 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount] 1033 } 1034 1035 // add returns p+x. 1036 // 1037 // The whySafe string is ignored, so that the function still inlines 1038 // as efficiently as p+x, but all call sites should use the string to 1039 // record why the addition is safe, which is to say why the addition 1040 // does not cause x to advance to the very end of p's allocation 1041 // and therefore point incorrectly at the next block in memory. 1042 1043 //go:nocheckptr 1044 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 1045 return unsafe.Pointer(uintptr(p) + x) 1046 } 1047 1048 func (d ChanDir) String() string { 1049 switch d { 1050 case SendDir: 1051 return "chan<-" 1052 case RecvDir: 1053 return "<-chan" 1054 case BothDir: 1055 return "chan" 1056 } 1057 return "ChanDir" + strconv.Itoa(int(d)) 1058 } 1059 1060 // Method returns the i'th method in the type's method set. 1061 func (t *interfaceType) Method(i int) (m Method) { 1062 if i < 0 || i >= len(t.methods) { 1063 return 1064 } 1065 p := &t.methods[i] 1066 pname := t.nameOff(p.name) 1067 m.Name = pname.name() 1068 if !pname.isExported() { 1069 m.PkgPath = pname.pkgPath() 1070 if m.PkgPath == "" { 1071 m.PkgPath = t.pkgPath.name() 1072 } 1073 } 1074 m.Type = toType(t.typeOff(p.typ)) 1075 m.Index = i 1076 return 1077 } 1078 1079 // NumMethod returns the number of interface methods in the type's method set. 1080 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1081 1082 // MethodByName method with the given name in the type's method set. 1083 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1084 if t == nil { 1085 return 1086 } 1087 var p *imethod 1088 for i := range t.methods { 1089 p = &t.methods[i] 1090 if t.nameOff(p.name).name() == name { 1091 return t.Method(i), true 1092 } 1093 } 1094 return 1095 } 1096 1097 // A StructField describes a single field in a struct. 1098 type StructField struct { 1099 // Name is the field name. 1100 Name string 1101 // PkgPath is the package path that qualifies a lower case (unexported) 1102 // field name. It is empty for upper case (exported) field names. 1103 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1104 PkgPath string 1105 1106 Type Type // field type 1107 Tag StructTag // field tag string 1108 Offset uintptr // offset within struct, in bytes 1109 Index []int // index sequence for Type.FieldByIndex 1110 Anonymous bool // is an embedded field 1111 } 1112 1113 // A StructTag is the tag string in a struct field. 1114 // 1115 // By convention, tag strings are a concatenation of 1116 // optionally space-separated key:"value" pairs. 1117 // Each key is a non-empty string consisting of non-control 1118 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1119 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1120 // characters and Go string literal syntax. 1121 type StructTag string 1122 1123 // Get returns the value associated with key in the tag string. 1124 // If there is no such key in the tag, Get returns the empty string. 1125 // If the tag does not have the conventional format, the value 1126 // returned by Get is unspecified. To determine whether a tag is 1127 // explicitly set to the empty string, use Lookup. 1128 func (tag StructTag) Get(key string) string { 1129 v, _ := tag.Lookup(key) 1130 return v 1131 } 1132 1133 // Lookup returns the value associated with key in the tag string. 1134 // If the key is present in the tag the value (which may be empty) 1135 // is returned. Otherwise the returned value will be the empty string. 1136 // The ok return value reports whether the value was explicitly set in 1137 // the tag string. If the tag does not have the conventional format, 1138 // the value returned by Lookup is unspecified. 1139 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1140 // When modifying this code, also update the validateStructTag code 1141 // in cmd/vet/structtag.go. 1142 1143 for tag != "" { 1144 // Skip leading space. 1145 i := 0 1146 for i < len(tag) && tag[i] == ' ' { 1147 i++ 1148 } 1149 tag = tag[i:] 1150 if tag == "" { 1151 break 1152 } 1153 1154 // Scan to colon. A space, a quote or a control character is a syntax error. 1155 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1156 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1157 // as it is simpler to inspect the tag's bytes than the tag's runes. 1158 i = 0 1159 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1160 i++ 1161 } 1162 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1163 break 1164 } 1165 name := string(tag[:i]) 1166 tag = tag[i+1:] 1167 1168 // Scan quoted string to find value. 1169 i = 1 1170 for i < len(tag) && tag[i] != '"' { 1171 if tag[i] == '\\' { 1172 i++ 1173 } 1174 i++ 1175 } 1176 if i >= len(tag) { 1177 break 1178 } 1179 qvalue := string(tag[:i+1]) 1180 tag = tag[i+1:] 1181 1182 if key == name { 1183 value, err := strconv.Unquote(qvalue) 1184 if err != nil { 1185 break 1186 } 1187 return value, true 1188 } 1189 } 1190 return "", false 1191 } 1192 1193 // Field returns the i'th struct field. 1194 func (t *structType) Field(i int) (f StructField) { 1195 if i < 0 || i >= len(t.fields) { 1196 panic("reflect: Field index out of bounds") 1197 } 1198 p := &t.fields[i] 1199 f.Type = toType(p.typ) 1200 f.Name = p.name.name() 1201 f.Anonymous = p.embedded() 1202 if !p.name.isExported() { 1203 f.PkgPath = t.pkgPath.name() 1204 } 1205 if tag := p.name.tag(); tag != "" { 1206 f.Tag = StructTag(tag) 1207 } 1208 f.Offset = p.offset() 1209 1210 // NOTE(rsc): This is the only allocation in the interface 1211 // presented by a reflect.Type. It would be nice to avoid, 1212 // at least in the common cases, but we need to make sure 1213 // that misbehaving clients of reflect cannot affect other 1214 // uses of reflect. One possibility is CL 5371098, but we 1215 // postponed that ugliness until there is a demonstrated 1216 // need for the performance. This is issue 2320. 1217 f.Index = []int{i} 1218 return 1219 } 1220 1221 // TODO(gri): Should there be an error/bool indicator if the index 1222 // is wrong for FieldByIndex? 1223 1224 // FieldByIndex returns the nested field corresponding to index. 1225 func (t *structType) FieldByIndex(index []int) (f StructField) { 1226 f.Type = toType(&t.rtype) 1227 for i, x := range index { 1228 if i > 0 { 1229 ft := f.Type 1230 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1231 ft = ft.Elem() 1232 } 1233 f.Type = ft 1234 } 1235 f = f.Type.Field(x) 1236 } 1237 return 1238 } 1239 1240 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1241 type fieldScan struct { 1242 typ *structType 1243 index []int 1244 } 1245 1246 // FieldByNameFunc returns the struct field with a name that satisfies the 1247 // match function and a boolean to indicate if the field was found. 1248 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1249 // This uses the same condition that the Go language does: there must be a unique instance 1250 // of the match at a given depth level. If there are multiple instances of a match at the 1251 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1252 // The algorithm is breadth first search, one depth level at a time. 1253 1254 // The current and next slices are work queues: 1255 // current lists the fields to visit on this depth level, 1256 // and next lists the fields on the next lower level. 1257 current := []fieldScan{} 1258 next := []fieldScan{{typ: t}} 1259 1260 // nextCount records the number of times an embedded type has been 1261 // encountered and considered for queueing in the 'next' slice. 1262 // We only queue the first one, but we increment the count on each. 1263 // If a struct type T can be reached more than once at a given depth level, 1264 // then it annihilates itself and need not be considered at all when we 1265 // process that next depth level. 1266 var nextCount map[*structType]int 1267 1268 // visited records the structs that have been considered already. 1269 // Embedded pointer fields can create cycles in the graph of 1270 // reachable embedded types; visited avoids following those cycles. 1271 // It also avoids duplicated effort: if we didn't find the field in an 1272 // embedded type T at level 2, we won't find it in one at level 4 either. 1273 visited := map[*structType]bool{} 1274 1275 for len(next) > 0 { 1276 current, next = next, current[:0] 1277 count := nextCount 1278 nextCount = nil 1279 1280 // Process all the fields at this depth, now listed in 'current'. 1281 // The loop queues embedded fields found in 'next', for processing during the next 1282 // iteration. The multiplicity of the 'current' field counts is recorded 1283 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1284 for _, scan := range current { 1285 t := scan.typ 1286 if visited[t] { 1287 // We've looked through this type before, at a higher level. 1288 // That higher level would shadow the lower level we're now at, 1289 // so this one can't be useful to us. Ignore it. 1290 continue 1291 } 1292 visited[t] = true 1293 for i := range t.fields { 1294 f := &t.fields[i] 1295 // Find name and (for embedded field) type for field f. 1296 fname := f.name.name() 1297 var ntyp *rtype 1298 if f.embedded() { 1299 // Embedded field of type T or *T. 1300 ntyp = f.typ 1301 if ntyp.Kind() == Ptr { 1302 ntyp = ntyp.Elem().common() 1303 } 1304 } 1305 1306 // Does it match? 1307 if match(fname) { 1308 // Potential match 1309 if count[t] > 1 || ok { 1310 // Name appeared multiple times at this level: annihilate. 1311 return StructField{}, false 1312 } 1313 result = t.Field(i) 1314 result.Index = nil 1315 result.Index = append(result.Index, scan.index...) 1316 result.Index = append(result.Index, i) 1317 ok = true 1318 continue 1319 } 1320 1321 // Queue embedded struct fields for processing with next level, 1322 // but only if we haven't seen a match yet at this level and only 1323 // if the embedded types haven't already been queued. 1324 if ok || ntyp == nil || ntyp.Kind() != Struct { 1325 continue 1326 } 1327 styp := (*structType)(unsafe.Pointer(ntyp)) 1328 if nextCount[styp] > 0 { 1329 nextCount[styp] = 2 // exact multiple doesn't matter 1330 continue 1331 } 1332 if nextCount == nil { 1333 nextCount = map[*structType]int{} 1334 } 1335 nextCount[styp] = 1 1336 if count[t] > 1 { 1337 nextCount[styp] = 2 // exact multiple doesn't matter 1338 } 1339 var index []int 1340 index = append(index, scan.index...) 1341 index = append(index, i) 1342 next = append(next, fieldScan{styp, index}) 1343 } 1344 } 1345 if ok { 1346 break 1347 } 1348 } 1349 return 1350 } 1351 1352 // FieldByName returns the struct field with the given name 1353 // and a boolean to indicate if the field was found. 1354 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1355 // Quick check for top-level name, or struct without embedded fields. 1356 hasEmbeds := false 1357 if name != "" { 1358 for i := range t.fields { 1359 tf := &t.fields[i] 1360 if tf.name.name() == name { 1361 return t.Field(i), true 1362 } 1363 if tf.embedded() { 1364 hasEmbeds = true 1365 } 1366 } 1367 } 1368 if !hasEmbeds { 1369 return 1370 } 1371 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1372 } 1373 1374 // TypeOf returns the reflection Type that represents the dynamic type of i. 1375 // If i is a nil interface value, TypeOf returns nil. 1376 func TypeOf(i interface{}) Type { 1377 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1378 return toType(eface.typ) 1379 } 1380 1381 // ptrMap is the cache for PtrTo. 1382 var ptrMap sync.Map // map[*rtype]*ptrType 1383 1384 // PtrTo returns the pointer type with element t. 1385 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1386 func PtrTo(t Type) Type { 1387 return t.(*rtype).ptrTo() 1388 } 1389 1390 func (t *rtype) ptrTo() *rtype { 1391 if t.ptrToThis != 0 { 1392 return t.typeOff(t.ptrToThis) 1393 } 1394 1395 // Check the cache. 1396 if pi, ok := ptrMap.Load(t); ok { 1397 return &pi.(*ptrType).rtype 1398 } 1399 1400 // Look in known types. 1401 s := "*" + t.String() 1402 for _, tt := range typesByString(s) { 1403 p := (*ptrType)(unsafe.Pointer(tt)) 1404 if p.elem != t { 1405 continue 1406 } 1407 pi, _ := ptrMap.LoadOrStore(t, p) 1408 return &pi.(*ptrType).rtype 1409 } 1410 1411 // Create a new ptrType starting with the description 1412 // of an *unsafe.Pointer. 1413 var iptr interface{} = (*unsafe.Pointer)(nil) 1414 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1415 pp := *prototype 1416 1417 pp.str = resolveReflectName(newName(s, "", false)) 1418 pp.ptrToThis = 0 1419 1420 // For the type structures linked into the binary, the 1421 // compiler provides a good hash of the string. 1422 // Create a good hash for the new string by using 1423 // the FNV-1 hash's mixing function to combine the 1424 // old hash and the new "*". 1425 pp.hash = fnv1(t.hash, '*') 1426 1427 pp.elem = t 1428 1429 pi, _ := ptrMap.LoadOrStore(t, &pp) 1430 return &pi.(*ptrType).rtype 1431 } 1432 1433 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1434 func fnv1(x uint32, list ...byte) uint32 { 1435 for _, b := range list { 1436 x = x*16777619 ^ uint32(b) 1437 } 1438 return x 1439 } 1440 1441 func (t *rtype) Implements(u Type) bool { 1442 if u == nil { 1443 panic("reflect: nil type passed to Type.Implements") 1444 } 1445 if u.Kind() != Interface { 1446 panic("reflect: non-interface type passed to Type.Implements") 1447 } 1448 return implements(u.(*rtype), t) 1449 } 1450 1451 func (t *rtype) AssignableTo(u Type) bool { 1452 if u == nil { 1453 panic("reflect: nil type passed to Type.AssignableTo") 1454 } 1455 uu := u.(*rtype) 1456 return directlyAssignable(uu, t) || implements(uu, t) 1457 } 1458 1459 func (t *rtype) ConvertibleTo(u Type) bool { 1460 if u == nil { 1461 panic("reflect: nil type passed to Type.ConvertibleTo") 1462 } 1463 uu := u.(*rtype) 1464 return convertOp(uu, t) != nil 1465 } 1466 1467 func (t *rtype) Comparable() bool { 1468 return t.equal != nil 1469 } 1470 1471 // implements reports whether the type V implements the interface type T. 1472 func implements(T, V *rtype) bool { 1473 if T.Kind() != Interface { 1474 return false 1475 } 1476 t := (*interfaceType)(unsafe.Pointer(T)) 1477 if len(t.methods) == 0 { 1478 return true 1479 } 1480 1481 // The same algorithm applies in both cases, but the 1482 // method tables for an interface type and a concrete type 1483 // are different, so the code is duplicated. 1484 // In both cases the algorithm is a linear scan over the two 1485 // lists - T's methods and V's methods - simultaneously. 1486 // Since method tables are stored in a unique sorted order 1487 // (alphabetical, with no duplicate method names), the scan 1488 // through V's methods must hit a match for each of T's 1489 // methods along the way, or else V does not implement T. 1490 // This lets us run the scan in overall linear time instead of 1491 // the quadratic time a naive search would require. 1492 // See also ../runtime/iface.go. 1493 if V.Kind() == Interface { 1494 v := (*interfaceType)(unsafe.Pointer(V)) 1495 i := 0 1496 for j := 0; j < len(v.methods); j++ { 1497 tm := &t.methods[i] 1498 tmName := t.nameOff(tm.name) 1499 vm := &v.methods[j] 1500 vmName := V.nameOff(vm.name) 1501 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1502 if !tmName.isExported() { 1503 tmPkgPath := tmName.pkgPath() 1504 if tmPkgPath == "" { 1505 tmPkgPath = t.pkgPath.name() 1506 } 1507 vmPkgPath := vmName.pkgPath() 1508 if vmPkgPath == "" { 1509 vmPkgPath = v.pkgPath.name() 1510 } 1511 if tmPkgPath != vmPkgPath { 1512 continue 1513 } 1514 } 1515 if i++; i >= len(t.methods) { 1516 return true 1517 } 1518 } 1519 } 1520 return false 1521 } 1522 1523 v := V.uncommon() 1524 if v == nil { 1525 return false 1526 } 1527 i := 0 1528 vmethods := v.methods() 1529 for j := 0; j < int(v.mcount); j++ { 1530 tm := &t.methods[i] 1531 tmName := t.nameOff(tm.name) 1532 vm := vmethods[j] 1533 vmName := V.nameOff(vm.name) 1534 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1535 if !tmName.isExported() { 1536 tmPkgPath := tmName.pkgPath() 1537 if tmPkgPath == "" { 1538 tmPkgPath = t.pkgPath.name() 1539 } 1540 vmPkgPath := vmName.pkgPath() 1541 if vmPkgPath == "" { 1542 vmPkgPath = V.nameOff(v.pkgPath).name() 1543 } 1544 if tmPkgPath != vmPkgPath { 1545 continue 1546 } 1547 } 1548 if i++; i >= len(t.methods) { 1549 return true 1550 } 1551 } 1552 } 1553 return false 1554 } 1555 1556 // specialChannelAssignability reports whether a value x of channel type V 1557 // can be directly assigned (using memmove) to another channel type T. 1558 // https://golang.org/doc/go_spec.html#Assignability 1559 // T and V must be both of Chan kind. 1560 func specialChannelAssignability(T, V *rtype) bool { 1561 // Special case: 1562 // x is a bidirectional channel value, T is a channel type, 1563 // x's type V and T have identical element types, 1564 // and at least one of V or T is not a defined type. 1565 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1566 } 1567 1568 // directlyAssignable reports whether a value x of type V can be directly 1569 // assigned (using memmove) to a value of type T. 1570 // https://golang.org/doc/go_spec.html#Assignability 1571 // Ignoring the interface rules (implemented elsewhere) 1572 // and the ideal constant rules (no ideal constants at run time). 1573 func directlyAssignable(T, V *rtype) bool { 1574 // x's type V is identical to T? 1575 if T == V { 1576 return true 1577 } 1578 1579 // Otherwise at least one of T and V must not be defined 1580 // and they must have the same kind. 1581 if T.hasName() && V.hasName() || T.Kind() != V.Kind() { 1582 return false 1583 } 1584 1585 if T.Kind() == Chan && specialChannelAssignability(T, V) { 1586 return true 1587 } 1588 1589 // x's type T and V must have identical underlying types. 1590 return haveIdenticalUnderlyingType(T, V, true) 1591 } 1592 1593 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1594 if cmpTags { 1595 return T == V 1596 } 1597 1598 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1599 return false 1600 } 1601 1602 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1603 } 1604 1605 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1606 if T == V { 1607 return true 1608 } 1609 1610 kind := T.Kind() 1611 if kind != V.Kind() { 1612 return false 1613 } 1614 1615 // Non-composite types of equal kind have same underlying type 1616 // (the predefined instance of the type). 1617 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1618 return true 1619 } 1620 1621 // Composite types. 1622 switch kind { 1623 case Array: 1624 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1625 1626 case Chan: 1627 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1628 1629 case Func: 1630 t := (*funcType)(unsafe.Pointer(T)) 1631 v := (*funcType)(unsafe.Pointer(V)) 1632 if t.outCount != v.outCount || t.inCount != v.inCount { 1633 return false 1634 } 1635 for i := 0; i < t.NumIn(); i++ { 1636 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1637 return false 1638 } 1639 } 1640 for i := 0; i < t.NumOut(); i++ { 1641 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1642 return false 1643 } 1644 } 1645 return true 1646 1647 case Interface: 1648 t := (*interfaceType)(unsafe.Pointer(T)) 1649 v := (*interfaceType)(unsafe.Pointer(V)) 1650 if len(t.methods) == 0 && len(v.methods) == 0 { 1651 return true 1652 } 1653 // Might have the same methods but still 1654 // need a run time conversion. 1655 return false 1656 1657 case Map: 1658 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1659 1660 case Ptr, Slice: 1661 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1662 1663 case Struct: 1664 t := (*structType)(unsafe.Pointer(T)) 1665 v := (*structType)(unsafe.Pointer(V)) 1666 if len(t.fields) != len(v.fields) { 1667 return false 1668 } 1669 if t.pkgPath.name() != v.pkgPath.name() { 1670 return false 1671 } 1672 for i := range t.fields { 1673 tf := &t.fields[i] 1674 vf := &v.fields[i] 1675 if tf.name.name() != vf.name.name() { 1676 return false 1677 } 1678 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1679 return false 1680 } 1681 if cmpTags && tf.name.tag() != vf.name.tag() { 1682 return false 1683 } 1684 if tf.offsetEmbed != vf.offsetEmbed { 1685 return false 1686 } 1687 } 1688 return true 1689 } 1690 1691 return false 1692 } 1693 1694 // typelinks is implemented in package runtime. 1695 // It returns a slice of the sections in each module, 1696 // and a slice of *rtype offsets in each module. 1697 // 1698 // The types in each module are sorted by string. That is, the first 1699 // two linked types of the first module are: 1700 // 1701 // d0 := sections[0] 1702 // t1 := (*rtype)(add(d0, offset[0][0])) 1703 // t2 := (*rtype)(add(d0, offset[0][1])) 1704 // 1705 // and 1706 // 1707 // t1.String() < t2.String() 1708 // 1709 // Note that strings are not unique identifiers for types: 1710 // there can be more than one with a given string. 1711 // Only types we might want to look up are included: 1712 // pointers, channels, maps, slices, and arrays. 1713 //go:linkname typelinks reflect.typelinks 1714 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1715 1716 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1717 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1718 } 1719 1720 // typesByString returns the subslice of typelinks() whose elements have 1721 // the given string representation. 1722 // It may be empty (no known types with that string) or may have 1723 // multiple elements (multiple types with that string). 1724 func typesByString(s string) []*rtype { 1725 sections, offset := typelinks() 1726 var ret []*rtype 1727 1728 for offsI, offs := range offset { 1729 section := sections[offsI] 1730 1731 // We are looking for the first index i where the string becomes >= s. 1732 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1733 i, j := 0, len(offs) 1734 for i < j { 1735 h := i + (j-i)/2 // avoid overflow when computing h 1736 // i ≤ h < j 1737 if !(rtypeOff(section, offs[h]).String() >= s) { 1738 i = h + 1 // preserves f(i-1) == false 1739 } else { 1740 j = h // preserves f(j) == true 1741 } 1742 } 1743 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1744 1745 // Having found the first, linear scan forward to find the last. 1746 // We could do a second binary search, but the caller is going 1747 // to do a linear scan anyway. 1748 for j := i; j < len(offs); j++ { 1749 typ := rtypeOff(section, offs[j]) 1750 if typ.String() != s { 1751 break 1752 } 1753 ret = append(ret, typ) 1754 } 1755 } 1756 return ret 1757 } 1758 1759 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1760 var lookupCache sync.Map // map[cacheKey]*rtype 1761 1762 // A cacheKey is the key for use in the lookupCache. 1763 // Four values describe any of the types we are looking for: 1764 // type kind, one or two subtypes, and an extra integer. 1765 type cacheKey struct { 1766 kind Kind 1767 t1 *rtype 1768 t2 *rtype 1769 extra uintptr 1770 } 1771 1772 // The funcLookupCache caches FuncOf lookups. 1773 // FuncOf does not share the common lookupCache since cacheKey is not 1774 // sufficient to represent functions unambiguously. 1775 var funcLookupCache struct { 1776 sync.Mutex // Guards stores (but not loads) on m. 1777 1778 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1779 // Elements of m are append-only and thus safe for concurrent reading. 1780 m sync.Map 1781 } 1782 1783 // ChanOf returns the channel type with the given direction and element type. 1784 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1785 // 1786 // The gc runtime imposes a limit of 64 kB on channel element types. 1787 // If t's size is equal to or exceeds this limit, ChanOf panics. 1788 func ChanOf(dir ChanDir, t Type) Type { 1789 typ := t.(*rtype) 1790 1791 // Look in cache. 1792 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1793 if ch, ok := lookupCache.Load(ckey); ok { 1794 return ch.(*rtype) 1795 } 1796 1797 // This restriction is imposed by the gc compiler and the runtime. 1798 if typ.size >= 1<<16 { 1799 panic("reflect.ChanOf: element size too large") 1800 } 1801 1802 // Look in known types. 1803 var s string 1804 switch dir { 1805 default: 1806 panic("reflect.ChanOf: invalid dir") 1807 case SendDir: 1808 s = "chan<- " + typ.String() 1809 case RecvDir: 1810 s = "<-chan " + typ.String() 1811 case BothDir: 1812 typeStr := typ.String() 1813 if typeStr[0] == '<' { 1814 // typ is recv chan, need parentheses as "<-" associates with leftmost 1815 // chan possible, see: 1816 // * https://golang.org/ref/spec#Channel_types 1817 // * https://github.com/golang/go/issues/39897 1818 s = "chan (" + typeStr + ")" 1819 } else { 1820 s = "chan " + typeStr 1821 } 1822 } 1823 for _, tt := range typesByString(s) { 1824 ch := (*chanType)(unsafe.Pointer(tt)) 1825 if ch.elem == typ && ch.dir == uintptr(dir) { 1826 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1827 return ti.(Type) 1828 } 1829 } 1830 1831 // Make a channel type. 1832 var ichan interface{} = (chan unsafe.Pointer)(nil) 1833 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1834 ch := *prototype 1835 ch.tflag = tflagRegularMemory 1836 ch.dir = uintptr(dir) 1837 ch.str = resolveReflectName(newName(s, "", false)) 1838 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1839 ch.elem = typ 1840 1841 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) 1842 return ti.(Type) 1843 } 1844 1845 // MapOf returns the map type with the given key and element types. 1846 // For example, if k represents int and e represents string, 1847 // MapOf(k, e) represents map[int]string. 1848 // 1849 // If the key type is not a valid map key type (that is, if it does 1850 // not implement Go's == operator), MapOf panics. 1851 func MapOf(key, elem Type) Type { 1852 ktyp := key.(*rtype) 1853 etyp := elem.(*rtype) 1854 1855 if ktyp.equal == nil { 1856 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1857 } 1858 1859 // Look in cache. 1860 ckey := cacheKey{Map, ktyp, etyp, 0} 1861 if mt, ok := lookupCache.Load(ckey); ok { 1862 return mt.(Type) 1863 } 1864 1865 // Look in known types. 1866 s := "map[" + ktyp.String() + "]" + etyp.String() 1867 for _, tt := range typesByString(s) { 1868 mt := (*mapType)(unsafe.Pointer(tt)) 1869 if mt.key == ktyp && mt.elem == etyp { 1870 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1871 return ti.(Type) 1872 } 1873 } 1874 1875 // Make a map type. 1876 // Note: flag values must match those used in the TMAP case 1877 // in ../cmd/compile/internal/gc/reflect.go:dtypesym. 1878 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1879 mt := **(**mapType)(unsafe.Pointer(&imap)) 1880 mt.str = resolveReflectName(newName(s, "", false)) 1881 mt.tflag = 0 1882 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1883 mt.key = ktyp 1884 mt.elem = etyp 1885 mt.bucket = bucketOf(ktyp, etyp) 1886 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1887 return typehash(ktyp, p, seed) 1888 } 1889 mt.flags = 0 1890 if ktyp.size > maxKeySize { 1891 mt.keysize = uint8(ptrSize) 1892 mt.flags |= 1 // indirect key 1893 } else { 1894 mt.keysize = uint8(ktyp.size) 1895 } 1896 if etyp.size > maxValSize { 1897 mt.valuesize = uint8(ptrSize) 1898 mt.flags |= 2 // indirect value 1899 } else { 1900 mt.valuesize = uint8(etyp.size) 1901 } 1902 mt.bucketsize = uint16(mt.bucket.size) 1903 if isReflexive(ktyp) { 1904 mt.flags |= 4 1905 } 1906 if needKeyUpdate(ktyp) { 1907 mt.flags |= 8 1908 } 1909 if hashMightPanic(ktyp) { 1910 mt.flags |= 16 1911 } 1912 mt.ptrToThis = 0 1913 1914 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) 1915 return ti.(Type) 1916 } 1917 1918 // TODO(crawshaw): as these funcTypeFixedN structs have no methods, 1919 // they could be defined at runtime using the StructOf function. 1920 type funcTypeFixed4 struct { 1921 funcType 1922 args [4]*rtype 1923 } 1924 type funcTypeFixed8 struct { 1925 funcType 1926 args [8]*rtype 1927 } 1928 type funcTypeFixed16 struct { 1929 funcType 1930 args [16]*rtype 1931 } 1932 type funcTypeFixed32 struct { 1933 funcType 1934 args [32]*rtype 1935 } 1936 type funcTypeFixed64 struct { 1937 funcType 1938 args [64]*rtype 1939 } 1940 type funcTypeFixed128 struct { 1941 funcType 1942 args [128]*rtype 1943 } 1944 1945 // FuncOf returns the function type with the given argument and result types. 1946 // For example if k represents int and e represents string, 1947 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1948 // 1949 // The variadic argument controls whether the function is variadic. FuncOf 1950 // panics if the in[len(in)-1] does not represent a slice and variadic is 1951 // true. 1952 func FuncOf(in, out []Type, variadic bool) Type { 1953 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1954 panic("reflect.FuncOf: last arg of variadic func must be slice") 1955 } 1956 1957 // Make a func type. 1958 var ifunc interface{} = (func())(nil) 1959 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1960 n := len(in) + len(out) 1961 1962 var ft *funcType 1963 var args []*rtype 1964 switch { 1965 case n <= 4: 1966 fixed := new(funcTypeFixed4) 1967 args = fixed.args[:0:len(fixed.args)] 1968 ft = &fixed.funcType 1969 case n <= 8: 1970 fixed := new(funcTypeFixed8) 1971 args = fixed.args[:0:len(fixed.args)] 1972 ft = &fixed.funcType 1973 case n <= 16: 1974 fixed := new(funcTypeFixed16) 1975 args = fixed.args[:0:len(fixed.args)] 1976 ft = &fixed.funcType 1977 case n <= 32: 1978 fixed := new(funcTypeFixed32) 1979 args = fixed.args[:0:len(fixed.args)] 1980 ft = &fixed.funcType 1981 case n <= 64: 1982 fixed := new(funcTypeFixed64) 1983 args = fixed.args[:0:len(fixed.args)] 1984 ft = &fixed.funcType 1985 case n <= 128: 1986 fixed := new(funcTypeFixed128) 1987 args = fixed.args[:0:len(fixed.args)] 1988 ft = &fixed.funcType 1989 default: 1990 panic("reflect.FuncOf: too many arguments") 1991 } 1992 *ft = *prototype 1993 1994 // Build a hash and minimally populate ft. 1995 var hash uint32 1996 for _, in := range in { 1997 t := in.(*rtype) 1998 args = append(args, t) 1999 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2000 } 2001 if variadic { 2002 hash = fnv1(hash, 'v') 2003 } 2004 hash = fnv1(hash, '.') 2005 for _, out := range out { 2006 t := out.(*rtype) 2007 args = append(args, t) 2008 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2009 } 2010 if len(args) > 50 { 2011 panic("reflect.FuncOf does not support more than 50 arguments") 2012 } 2013 ft.tflag = 0 2014 ft.hash = hash 2015 ft.inCount = uint16(len(in)) 2016 ft.outCount = uint16(len(out)) 2017 if variadic { 2018 ft.outCount |= 1 << 15 2019 } 2020 2021 // Look in cache. 2022 if ts, ok := funcLookupCache.m.Load(hash); ok { 2023 for _, t := range ts.([]*rtype) { 2024 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2025 return t 2026 } 2027 } 2028 } 2029 2030 // Not in cache, lock and retry. 2031 funcLookupCache.Lock() 2032 defer funcLookupCache.Unlock() 2033 if ts, ok := funcLookupCache.m.Load(hash); ok { 2034 for _, t := range ts.([]*rtype) { 2035 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2036 return t 2037 } 2038 } 2039 } 2040 2041 addToCache := func(tt *rtype) Type { 2042 var rts []*rtype 2043 if rti, ok := funcLookupCache.m.Load(hash); ok { 2044 rts = rti.([]*rtype) 2045 } 2046 funcLookupCache.m.Store(hash, append(rts, tt)) 2047 return tt 2048 } 2049 2050 // Look in known types for the same string representation. 2051 str := funcStr(ft) 2052 for _, tt := range typesByString(str) { 2053 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2054 return addToCache(tt) 2055 } 2056 } 2057 2058 // Populate the remaining fields of ft and store in cache. 2059 ft.str = resolveReflectName(newName(str, "", false)) 2060 ft.ptrToThis = 0 2061 return addToCache(&ft.rtype) 2062 } 2063 2064 // funcStr builds a string representation of a funcType. 2065 func funcStr(ft *funcType) string { 2066 repr := make([]byte, 0, 64) 2067 repr = append(repr, "func("...) 2068 for i, t := range ft.in() { 2069 if i > 0 { 2070 repr = append(repr, ", "...) 2071 } 2072 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2073 repr = append(repr, "..."...) 2074 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2075 } else { 2076 repr = append(repr, t.String()...) 2077 } 2078 } 2079 repr = append(repr, ')') 2080 out := ft.out() 2081 if len(out) == 1 { 2082 repr = append(repr, ' ') 2083 } else if len(out) > 1 { 2084 repr = append(repr, " ("...) 2085 } 2086 for i, t := range out { 2087 if i > 0 { 2088 repr = append(repr, ", "...) 2089 } 2090 repr = append(repr, t.String()...) 2091 } 2092 if len(out) > 1 { 2093 repr = append(repr, ')') 2094 } 2095 return string(repr) 2096 } 2097 2098 // isReflexive reports whether the == operation on the type is reflexive. 2099 // That is, x == x for all values x of type t. 2100 func isReflexive(t *rtype) bool { 2101 switch t.Kind() { 2102 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2103 return true 2104 case Float32, Float64, Complex64, Complex128, Interface: 2105 return false 2106 case Array: 2107 tt := (*arrayType)(unsafe.Pointer(t)) 2108 return isReflexive(tt.elem) 2109 case Struct: 2110 tt := (*structType)(unsafe.Pointer(t)) 2111 for _, f := range tt.fields { 2112 if !isReflexive(f.typ) { 2113 return false 2114 } 2115 } 2116 return true 2117 default: 2118 // Func, Map, Slice, Invalid 2119 panic("isReflexive called on non-key type " + t.String()) 2120 } 2121 } 2122 2123 // needKeyUpdate reports whether map overwrites require the key to be copied. 2124 func needKeyUpdate(t *rtype) bool { 2125 switch t.Kind() { 2126 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2127 return false 2128 case Float32, Float64, Complex64, Complex128, Interface, String: 2129 // Float keys can be updated from +0 to -0. 2130 // String keys can be updated to use a smaller backing store. 2131 // Interfaces might have floats of strings in them. 2132 return true 2133 case Array: 2134 tt := (*arrayType)(unsafe.Pointer(t)) 2135 return needKeyUpdate(tt.elem) 2136 case Struct: 2137 tt := (*structType)(unsafe.Pointer(t)) 2138 for _, f := range tt.fields { 2139 if needKeyUpdate(f.typ) { 2140 return true 2141 } 2142 } 2143 return false 2144 default: 2145 // Func, Map, Slice, Invalid 2146 panic("needKeyUpdate called on non-key type " + t.String()) 2147 } 2148 } 2149 2150 // hashMightPanic reports whether the hash of a map key of type t might panic. 2151 func hashMightPanic(t *rtype) bool { 2152 switch t.Kind() { 2153 case Interface: 2154 return true 2155 case Array: 2156 tt := (*arrayType)(unsafe.Pointer(t)) 2157 return hashMightPanic(tt.elem) 2158 case Struct: 2159 tt := (*structType)(unsafe.Pointer(t)) 2160 for _, f := range tt.fields { 2161 if hashMightPanic(f.typ) { 2162 return true 2163 } 2164 } 2165 return false 2166 default: 2167 return false 2168 } 2169 } 2170 2171 // Make sure these routines stay in sync with ../../runtime/map.go! 2172 // These types exist only for GC, so we only fill out GC relevant info. 2173 // Currently, that's just size and the GC program. We also fill in string 2174 // for possible debugging use. 2175 const ( 2176 bucketSize uintptr = 8 2177 maxKeySize uintptr = 128 2178 maxValSize uintptr = 128 2179 ) 2180 2181 func bucketOf(ktyp, etyp *rtype) *rtype { 2182 if ktyp.size > maxKeySize { 2183 ktyp = PtrTo(ktyp).(*rtype) 2184 } 2185 if etyp.size > maxValSize { 2186 etyp = PtrTo(etyp).(*rtype) 2187 } 2188 2189 // Prepare GC data if any. 2190 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2191 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2192 // Note that since the key and value are known to be <= 128 bytes, 2193 // they're guaranteed to have bitmaps instead of GC programs. 2194 var gcdata *byte 2195 var ptrdata uintptr 2196 var overflowPad uintptr 2197 2198 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2199 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2200 panic("reflect: bad size computation in MapOf") 2201 } 2202 2203 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 { 2204 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2205 mask := make([]byte, (nptr+7)/8) 2206 base := bucketSize / ptrSize 2207 2208 if ktyp.ptrdata != 0 { 2209 emitGCMask(mask, base, ktyp, bucketSize) 2210 } 2211 base += bucketSize * ktyp.size / ptrSize 2212 2213 if etyp.ptrdata != 0 { 2214 emitGCMask(mask, base, etyp, bucketSize) 2215 } 2216 base += bucketSize * etyp.size / ptrSize 2217 base += overflowPad / ptrSize 2218 2219 word := base 2220 mask[word/8] |= 1 << (word % 8) 2221 gcdata = &mask[0] 2222 ptrdata = (word + 1) * ptrSize 2223 2224 // overflow word must be last 2225 if ptrdata != size { 2226 panic("reflect: bad layout computation in MapOf") 2227 } 2228 } 2229 2230 b := &rtype{ 2231 align: ptrSize, 2232 size: size, 2233 kind: uint8(Struct), 2234 ptrdata: ptrdata, 2235 gcdata: gcdata, 2236 } 2237 if overflowPad > 0 { 2238 b.align = 8 2239 } 2240 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2241 b.str = resolveReflectName(newName(s, "", false)) 2242 return b 2243 } 2244 2245 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2246 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end] 2247 } 2248 2249 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2250 // offset base. 2251 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { 2252 if typ.kind&kindGCProg != 0 { 2253 panic("reflect: unexpected GC program") 2254 } 2255 ptrs := typ.ptrdata / ptrSize 2256 words := typ.size / ptrSize 2257 mask := typ.gcSlice(0, (ptrs+7)/8) 2258 for j := uintptr(0); j < ptrs; j++ { 2259 if (mask[j/8]>>(j%8))&1 != 0 { 2260 for i := uintptr(0); i < n; i++ { 2261 k := base + i*words + j 2262 out[k/8] |= 1 << (k % 8) 2263 } 2264 } 2265 } 2266 } 2267 2268 // appendGCProg appends the GC program for the first ptrdata bytes of 2269 // typ to dst and returns the extended slice. 2270 func appendGCProg(dst []byte, typ *rtype) []byte { 2271 if typ.kind&kindGCProg != 0 { 2272 // Element has GC program; emit one element. 2273 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata))) 2274 prog := typ.gcSlice(4, 4+n-1) 2275 return append(dst, prog...) 2276 } 2277 2278 // Element is small with pointer mask; use as literal bits. 2279 ptrs := typ.ptrdata / ptrSize 2280 mask := typ.gcSlice(0, (ptrs+7)/8) 2281 2282 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2283 for ; ptrs > 120; ptrs -= 120 { 2284 dst = append(dst, 120) 2285 dst = append(dst, mask[:15]...) 2286 mask = mask[15:] 2287 } 2288 2289 dst = append(dst, byte(ptrs)) 2290 dst = append(dst, mask...) 2291 return dst 2292 } 2293 2294 // SliceOf returns the slice type with element type t. 2295 // For example, if t represents int, SliceOf(t) represents []int. 2296 func SliceOf(t Type) Type { 2297 typ := t.(*rtype) 2298 2299 // Look in cache. 2300 ckey := cacheKey{Slice, typ, nil, 0} 2301 if slice, ok := lookupCache.Load(ckey); ok { 2302 return slice.(Type) 2303 } 2304 2305 // Look in known types. 2306 s := "[]" + typ.String() 2307 for _, tt := range typesByString(s) { 2308 slice := (*sliceType)(unsafe.Pointer(tt)) 2309 if slice.elem == typ { 2310 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2311 return ti.(Type) 2312 } 2313 } 2314 2315 // Make a slice type. 2316 var islice interface{} = ([]unsafe.Pointer)(nil) 2317 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2318 slice := *prototype 2319 slice.tflag = 0 2320 slice.str = resolveReflectName(newName(s, "", false)) 2321 slice.hash = fnv1(typ.hash, '[') 2322 slice.elem = typ 2323 slice.ptrToThis = 0 2324 2325 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) 2326 return ti.(Type) 2327 } 2328 2329 // The structLookupCache caches StructOf lookups. 2330 // StructOf does not share the common lookupCache since we need to pin 2331 // the memory associated with *structTypeFixedN. 2332 var structLookupCache struct { 2333 sync.Mutex // Guards stores (but not loads) on m. 2334 2335 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2336 // Elements in m are append-only and thus safe for concurrent reading. 2337 m sync.Map 2338 } 2339 2340 type structTypeUncommon struct { 2341 structType 2342 u uncommonType 2343 } 2344 2345 // isLetter reports whether a given 'rune' is classified as a Letter. 2346 func isLetter(ch rune) bool { 2347 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2348 } 2349 2350 // isValidFieldName checks if a string is a valid (struct) field name or not. 2351 // 2352 // According to the language spec, a field name should be an identifier. 2353 // 2354 // identifier = letter { letter | unicode_digit } . 2355 // letter = unicode_letter | "_" . 2356 func isValidFieldName(fieldName string) bool { 2357 for i, c := range fieldName { 2358 if i == 0 && !isLetter(c) { 2359 return false 2360 } 2361 2362 if !(isLetter(c) || unicode.IsDigit(c)) { 2363 return false 2364 } 2365 } 2366 2367 return len(fieldName) > 0 2368 } 2369 2370 // StructOf returns the struct type containing fields. 2371 // The Offset and Index fields are ignored and computed as they would be 2372 // by the compiler. 2373 // 2374 // StructOf currently does not generate wrapper methods for embedded 2375 // fields and panics if passed unexported StructFields. 2376 // These limitations may be lifted in a future version. 2377 func StructOf(fields []StructField) Type { 2378 var ( 2379 hash = fnv1(0, []byte("struct {")...) 2380 size uintptr 2381 typalign uint8 2382 comparable = true 2383 methods []method 2384 2385 fs = make([]structField, len(fields)) 2386 repr = make([]byte, 0, 64) 2387 fset = map[string]struct{}{} // fields' names 2388 2389 hasGCProg = false // records whether a struct-field type has a GCProg 2390 ) 2391 2392 lastzero := uintptr(0) 2393 repr = append(repr, "struct {"...) 2394 pkgpath := "" 2395 for i, field := range fields { 2396 if field.Name == "" { 2397 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2398 } 2399 if !isValidFieldName(field.Name) { 2400 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2401 } 2402 if field.Type == nil { 2403 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2404 } 2405 f, fpkgpath := runtimeStructField(field) 2406 ft := f.typ 2407 if ft.kind&kindGCProg != 0 { 2408 hasGCProg = true 2409 } 2410 if fpkgpath != "" { 2411 if pkgpath == "" { 2412 pkgpath = fpkgpath 2413 } else if pkgpath != fpkgpath { 2414 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2415 } 2416 } 2417 2418 // Update string and hash 2419 name := f.name.name() 2420 hash = fnv1(hash, []byte(name)...) 2421 repr = append(repr, (" " + name)...) 2422 if f.embedded() { 2423 // Embedded field 2424 if f.typ.Kind() == Ptr { 2425 // Embedded ** and *interface{} are illegal 2426 elem := ft.Elem() 2427 if k := elem.Kind(); k == Ptr || k == Interface { 2428 panic("reflect.StructOf: illegal embedded field type " + ft.String()) 2429 } 2430 } 2431 2432 switch f.typ.Kind() { 2433 case Interface: 2434 ift := (*interfaceType)(unsafe.Pointer(ft)) 2435 for im, m := range ift.methods { 2436 if ift.nameOff(m.name).pkgPath() != "" { 2437 // TODO(sbinet). Issue 15924. 2438 panic("reflect: embedded interface with unexported method(s) not implemented") 2439 } 2440 2441 var ( 2442 mtyp = ift.typeOff(m.typ) 2443 ifield = i 2444 imethod = im 2445 ifn Value 2446 tfn Value 2447 ) 2448 2449 if ft.kind&kindDirectIface != 0 { 2450 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2451 var args []Value 2452 var recv = in[0] 2453 if len(in) > 1 { 2454 args = in[1:] 2455 } 2456 return recv.Field(ifield).Method(imethod).Call(args) 2457 }) 2458 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2459 var args []Value 2460 var recv = in[0] 2461 if len(in) > 1 { 2462 args = in[1:] 2463 } 2464 return recv.Field(ifield).Method(imethod).Call(args) 2465 }) 2466 } else { 2467 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2468 var args []Value 2469 var recv = in[0] 2470 if len(in) > 1 { 2471 args = in[1:] 2472 } 2473 return recv.Field(ifield).Method(imethod).Call(args) 2474 }) 2475 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2476 var args []Value 2477 var recv = Indirect(in[0]) 2478 if len(in) > 1 { 2479 args = in[1:] 2480 } 2481 return recv.Field(ifield).Method(imethod).Call(args) 2482 }) 2483 } 2484 2485 methods = append(methods, method{ 2486 name: resolveReflectName(ift.nameOff(m.name)), 2487 mtyp: resolveReflectType(mtyp), 2488 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2489 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2490 }) 2491 } 2492 case Ptr: 2493 ptr := (*ptrType)(unsafe.Pointer(ft)) 2494 if unt := ptr.uncommon(); unt != nil { 2495 if i > 0 && unt.mcount > 0 { 2496 // Issue 15924. 2497 panic("reflect: embedded type with methods not implemented if type is not first field") 2498 } 2499 if len(fields) > 1 { 2500 panic("reflect: embedded type with methods not implemented if there is more than one field") 2501 } 2502 for _, m := range unt.methods() { 2503 mname := ptr.nameOff(m.name) 2504 if mname.pkgPath() != "" { 2505 // TODO(sbinet). 2506 // Issue 15924. 2507 panic("reflect: embedded interface with unexported method(s) not implemented") 2508 } 2509 methods = append(methods, method{ 2510 name: resolveReflectName(mname), 2511 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2512 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2513 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2514 }) 2515 } 2516 } 2517 if unt := ptr.elem.uncommon(); unt != nil { 2518 for _, m := range unt.methods() { 2519 mname := ptr.nameOff(m.name) 2520 if mname.pkgPath() != "" { 2521 // TODO(sbinet) 2522 // Issue 15924. 2523 panic("reflect: embedded interface with unexported method(s) not implemented") 2524 } 2525 methods = append(methods, method{ 2526 name: resolveReflectName(mname), 2527 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2528 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2529 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2530 }) 2531 } 2532 } 2533 default: 2534 if unt := ft.uncommon(); unt != nil { 2535 if i > 0 && unt.mcount > 0 { 2536 // Issue 15924. 2537 panic("reflect: embedded type with methods not implemented if type is not first field") 2538 } 2539 if len(fields) > 1 && ft.kind&kindDirectIface != 0 { 2540 panic("reflect: embedded type with methods not implemented for non-pointer type") 2541 } 2542 for _, m := range unt.methods() { 2543 mname := ft.nameOff(m.name) 2544 if mname.pkgPath() != "" { 2545 // TODO(sbinet) 2546 // Issue 15924. 2547 panic("reflect: embedded interface with unexported method(s) not implemented") 2548 } 2549 methods = append(methods, method{ 2550 name: resolveReflectName(mname), 2551 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2552 ifn: resolveReflectText(ft.textOff(m.ifn)), 2553 tfn: resolveReflectText(ft.textOff(m.tfn)), 2554 }) 2555 2556 } 2557 } 2558 } 2559 } 2560 if _, dup := fset[name]; dup { 2561 panic("reflect.StructOf: duplicate field " + name) 2562 } 2563 fset[name] = struct{}{} 2564 2565 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2566 2567 repr = append(repr, (" " + ft.String())...) 2568 if f.name.tagLen() > 0 { 2569 hash = fnv1(hash, []byte(f.name.tag())...) 2570 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2571 } 2572 if i < len(fields)-1 { 2573 repr = append(repr, ';') 2574 } 2575 2576 comparable = comparable && (ft.equal != nil) 2577 2578 offset := align(size, uintptr(ft.align)) 2579 if ft.align > typalign { 2580 typalign = ft.align 2581 } 2582 size = offset + ft.size 2583 f.offsetEmbed |= offset << 1 2584 2585 if ft.size == 0 { 2586 lastzero = size 2587 } 2588 2589 fs[i] = f 2590 } 2591 2592 if size > 0 && lastzero == size { 2593 // This is a non-zero sized struct that ends in a 2594 // zero-sized field. We add an extra byte of padding, 2595 // to ensure that taking the address of the final 2596 // zero-sized field can't manufacture a pointer to the 2597 // next object in the heap. See issue 9401. 2598 size++ 2599 } 2600 2601 var typ *structType 2602 var ut *uncommonType 2603 2604 if len(methods) == 0 { 2605 t := new(structTypeUncommon) 2606 typ = &t.structType 2607 ut = &t.u 2608 } else { 2609 // A *rtype representing a struct is followed directly in memory by an 2610 // array of method objects representing the methods attached to the 2611 // struct. To get the same layout for a run time generated type, we 2612 // need an array directly following the uncommonType memory. 2613 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2614 tt := New(StructOf([]StructField{ 2615 {Name: "S", Type: TypeOf(structType{})}, 2616 {Name: "U", Type: TypeOf(uncommonType{})}, 2617 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2618 })) 2619 2620 typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr())) 2621 ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr())) 2622 2623 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods) 2624 } 2625 // TODO(sbinet): Once we allow embedding multiple types, 2626 // methods will need to be sorted like the compiler does. 2627 // TODO(sbinet): Once we allow non-exported methods, we will 2628 // need to compute xcount as the number of exported methods. 2629 ut.mcount = uint16(len(methods)) 2630 ut.xcount = ut.mcount 2631 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2632 2633 if len(fs) > 0 { 2634 repr = append(repr, ' ') 2635 } 2636 repr = append(repr, '}') 2637 hash = fnv1(hash, '}') 2638 str := string(repr) 2639 2640 // Round the size up to be a multiple of the alignment. 2641 size = align(size, uintptr(typalign)) 2642 2643 // Make the struct type. 2644 var istruct interface{} = struct{}{} 2645 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2646 *typ = *prototype 2647 typ.fields = fs 2648 if pkgpath != "" { 2649 typ.pkgPath = newName(pkgpath, "", false) 2650 } 2651 2652 // Look in cache. 2653 if ts, ok := structLookupCache.m.Load(hash); ok { 2654 for _, st := range ts.([]Type) { 2655 t := st.common() 2656 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2657 return t 2658 } 2659 } 2660 } 2661 2662 // Not in cache, lock and retry. 2663 structLookupCache.Lock() 2664 defer structLookupCache.Unlock() 2665 if ts, ok := structLookupCache.m.Load(hash); ok { 2666 for _, st := range ts.([]Type) { 2667 t := st.common() 2668 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2669 return t 2670 } 2671 } 2672 } 2673 2674 addToCache := func(t Type) Type { 2675 var ts []Type 2676 if ti, ok := structLookupCache.m.Load(hash); ok { 2677 ts = ti.([]Type) 2678 } 2679 structLookupCache.m.Store(hash, append(ts, t)) 2680 return t 2681 } 2682 2683 // Look in known types. 2684 for _, t := range typesByString(str) { 2685 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2686 // even if 't' wasn't a structType with methods, we should be ok 2687 // as the 'u uncommonType' field won't be accessed except when 2688 // tflag&tflagUncommon is set. 2689 return addToCache(t) 2690 } 2691 } 2692 2693 typ.str = resolveReflectName(newName(str, "", false)) 2694 typ.tflag = 0 // TODO: set tflagRegularMemory 2695 typ.hash = hash 2696 typ.size = size 2697 typ.ptrdata = typeptrdata(typ.common()) 2698 typ.align = typalign 2699 typ.fieldAlign = typalign 2700 typ.ptrToThis = 0 2701 if len(methods) > 0 { 2702 typ.tflag |= tflagUncommon 2703 } 2704 2705 if hasGCProg { 2706 lastPtrField := 0 2707 for i, ft := range fs { 2708 if ft.typ.pointers() { 2709 lastPtrField = i 2710 } 2711 } 2712 prog := []byte{0, 0, 0, 0} // will be length of prog 2713 var off uintptr 2714 for i, ft := range fs { 2715 if i > lastPtrField { 2716 // gcprog should not include anything for any field after 2717 // the last field that contains pointer data 2718 break 2719 } 2720 if !ft.typ.pointers() { 2721 // Ignore pointerless fields. 2722 continue 2723 } 2724 // Pad to start of this field with zeros. 2725 if ft.offset() > off { 2726 n := (ft.offset() - off) / ptrSize 2727 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2728 if n > 1 { 2729 prog = append(prog, 0x81) // repeat previous bit 2730 prog = appendVarint(prog, n-1) // n-1 times 2731 } 2732 off = ft.offset() 2733 } 2734 2735 prog = appendGCProg(prog, ft.typ) 2736 off += ft.typ.ptrdata 2737 } 2738 prog = append(prog, 0) 2739 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2740 typ.kind |= kindGCProg 2741 typ.gcdata = &prog[0] 2742 } else { 2743 typ.kind &^= kindGCProg 2744 bv := new(bitVector) 2745 addTypeBits(bv, 0, typ.common()) 2746 if len(bv.data) > 0 { 2747 typ.gcdata = &bv.data[0] 2748 } 2749 } 2750 typ.equal = nil 2751 if comparable { 2752 typ.equal = func(p, q unsafe.Pointer) bool { 2753 for _, ft := range typ.fields { 2754 pi := add(p, ft.offset(), "&x.field safe") 2755 qi := add(q, ft.offset(), "&x.field safe") 2756 if !ft.typ.equal(pi, qi) { 2757 return false 2758 } 2759 } 2760 return true 2761 } 2762 } 2763 2764 switch { 2765 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2766 // structs of 1 direct iface type can be direct 2767 typ.kind |= kindDirectIface 2768 default: 2769 typ.kind &^= kindDirectIface 2770 } 2771 2772 return addToCache(&typ.rtype) 2773 } 2774 2775 // runtimeStructField takes a StructField value passed to StructOf and 2776 // returns both the corresponding internal representation, of type 2777 // structField, and the pkgpath value to use for this field. 2778 func runtimeStructField(field StructField) (structField, string) { 2779 if field.Anonymous && field.PkgPath != "" { 2780 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2781 } 2782 2783 exported := field.PkgPath == "" 2784 if exported { 2785 // Best-effort check for misuse. 2786 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2787 c := field.Name[0] 2788 if 'a' <= c && c <= 'z' || c == '_' { 2789 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2790 } 2791 } 2792 2793 offsetEmbed := uintptr(0) 2794 if field.Anonymous { 2795 offsetEmbed |= 1 2796 } 2797 2798 resolveReflectType(field.Type.common()) // install in runtime 2799 f := structField{ 2800 name: newName(field.Name, string(field.Tag), exported), 2801 typ: field.Type.common(), 2802 offsetEmbed: offsetEmbed, 2803 } 2804 return f, field.PkgPath 2805 } 2806 2807 // typeptrdata returns the length in bytes of the prefix of t 2808 // containing pointer data. Anything after this offset is scalar data. 2809 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2810 func typeptrdata(t *rtype) uintptr { 2811 switch t.Kind() { 2812 case Struct: 2813 st := (*structType)(unsafe.Pointer(t)) 2814 // find the last field that has pointers. 2815 field := -1 2816 for i := range st.fields { 2817 ft := st.fields[i].typ 2818 if ft.pointers() { 2819 field = i 2820 } 2821 } 2822 if field == -1 { 2823 return 0 2824 } 2825 f := st.fields[field] 2826 return f.offset() + f.typ.ptrdata 2827 2828 default: 2829 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2830 } 2831 } 2832 2833 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2834 const maxPtrmaskBytes = 2048 2835 2836 // ArrayOf returns the array type with the given count and element type. 2837 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2838 // 2839 // If the resulting type would be larger than the available address space, 2840 // ArrayOf panics. 2841 func ArrayOf(count int, elem Type) Type { 2842 typ := elem.(*rtype) 2843 2844 // Look in cache. 2845 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2846 if array, ok := lookupCache.Load(ckey); ok { 2847 return array.(Type) 2848 } 2849 2850 // Look in known types. 2851 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2852 for _, tt := range typesByString(s) { 2853 array := (*arrayType)(unsafe.Pointer(tt)) 2854 if array.elem == typ { 2855 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2856 return ti.(Type) 2857 } 2858 } 2859 2860 // Make an array type. 2861 var iarray interface{} = [1]unsafe.Pointer{} 2862 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2863 array := *prototype 2864 array.tflag = typ.tflag & tflagRegularMemory 2865 array.str = resolveReflectName(newName(s, "", false)) 2866 array.hash = fnv1(typ.hash, '[') 2867 for n := uint32(count); n > 0; n >>= 8 { 2868 array.hash = fnv1(array.hash, byte(n)) 2869 } 2870 array.hash = fnv1(array.hash, ']') 2871 array.elem = typ 2872 array.ptrToThis = 0 2873 if typ.size > 0 { 2874 max := ^uintptr(0) / typ.size 2875 if uintptr(count) > max { 2876 panic("reflect.ArrayOf: array size would exceed virtual address space") 2877 } 2878 } 2879 array.size = typ.size * uintptr(count) 2880 if count > 0 && typ.ptrdata != 0 { 2881 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2882 } 2883 array.align = typ.align 2884 array.fieldAlign = typ.fieldAlign 2885 array.len = uintptr(count) 2886 array.slice = SliceOf(elem).(*rtype) 2887 2888 switch { 2889 case typ.ptrdata == 0 || array.size == 0: 2890 // No pointers. 2891 array.gcdata = nil 2892 array.ptrdata = 0 2893 2894 case count == 1: 2895 // In memory, 1-element array looks just like the element. 2896 array.kind |= typ.kind & kindGCProg 2897 array.gcdata = typ.gcdata 2898 array.ptrdata = typ.ptrdata 2899 2900 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2901 // Element is small with pointer mask; array is still small. 2902 // Create direct pointer mask by turning each 1 bit in elem 2903 // into count 1 bits in larger mask. 2904 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2905 emitGCMask(mask, 0, typ, array.len) 2906 array.gcdata = &mask[0] 2907 2908 default: 2909 // Create program that emits one element 2910 // and then repeats to make the array. 2911 prog := []byte{0, 0, 0, 0} // will be length of prog 2912 prog = appendGCProg(prog, typ) 2913 // Pad from ptrdata to size. 2914 elemPtrs := typ.ptrdata / ptrSize 2915 elemWords := typ.size / ptrSize 2916 if elemPtrs < elemWords { 2917 // Emit literal 0 bit, then repeat as needed. 2918 prog = append(prog, 0x01, 0x00) 2919 if elemPtrs+1 < elemWords { 2920 prog = append(prog, 0x81) 2921 prog = appendVarint(prog, elemWords-elemPtrs-1) 2922 } 2923 } 2924 // Repeat count-1 times. 2925 if elemWords < 0x80 { 2926 prog = append(prog, byte(elemWords|0x80)) 2927 } else { 2928 prog = append(prog, 0x80) 2929 prog = appendVarint(prog, elemWords) 2930 } 2931 prog = appendVarint(prog, uintptr(count)-1) 2932 prog = append(prog, 0) 2933 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2934 array.kind |= kindGCProg 2935 array.gcdata = &prog[0] 2936 array.ptrdata = array.size // overestimate but ok; must match program 2937 } 2938 2939 etyp := typ.common() 2940 esize := etyp.Size() 2941 2942 array.equal = nil 2943 if eequal := etyp.equal; eequal != nil { 2944 array.equal = func(p, q unsafe.Pointer) bool { 2945 for i := 0; i < count; i++ { 2946 pi := arrayAt(p, i, esize, "i < count") 2947 qi := arrayAt(q, i, esize, "i < count") 2948 if !eequal(pi, qi) { 2949 return false 2950 } 2951 2952 } 2953 return true 2954 } 2955 } 2956 2957 switch { 2958 case count == 1 && !ifaceIndir(typ): 2959 // array of 1 direct iface type can be direct 2960 array.kind |= kindDirectIface 2961 default: 2962 array.kind &^= kindDirectIface 2963 } 2964 2965 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) 2966 return ti.(Type) 2967 } 2968 2969 func appendVarint(x []byte, v uintptr) []byte { 2970 for ; v >= 0x80; v >>= 7 { 2971 x = append(x, byte(v|0x80)) 2972 } 2973 x = append(x, byte(v)) 2974 return x 2975 } 2976 2977 // toType converts from a *rtype to a Type that can be returned 2978 // to the client of package reflect. In gc, the only concern is that 2979 // a nil *rtype must be replaced by a nil Type, but in gccgo this 2980 // function takes care of ensuring that multiple *rtype for the same 2981 // type are coalesced into a single Type. 2982 func toType(t *rtype) Type { 2983 if t == nil { 2984 return nil 2985 } 2986 return t 2987 } 2988 2989 type layoutKey struct { 2990 ftyp *funcType // function signature 2991 rcvr *rtype // receiver type, or nil if none 2992 } 2993 2994 type layoutType struct { 2995 t *rtype 2996 argSize uintptr // size of arguments 2997 retOffset uintptr // offset of return values. 2998 stack *bitVector 2999 framePool *sync.Pool 3000 } 3001 3002 var layoutCache sync.Map // map[layoutKey]layoutType 3003 3004 // funcLayout computes a struct type representing the layout of the 3005 // function arguments and return values for the function type t. 3006 // If rcvr != nil, rcvr specifies the type of the receiver. 3007 // The returned type exists only for GC, so we only fill out GC relevant info. 3008 // Currently, that's just size and the GC program. We also fill in 3009 // the name for possible debugging use. 3010 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3011 if t.Kind() != Func { 3012 panic("reflect: funcLayout of non-func type " + t.String()) 3013 } 3014 if rcvr != nil && rcvr.Kind() == Interface { 3015 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3016 } 3017 k := layoutKey{t, rcvr} 3018 if lti, ok := layoutCache.Load(k); ok { 3019 lt := lti.(layoutType) 3020 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3021 } 3022 3023 // compute gc program & stack bitmap for arguments 3024 ptrmap := new(bitVector) 3025 var offset uintptr 3026 if rcvr != nil { 3027 // Reflect uses the "interface" calling convention for 3028 // methods, where receivers take one word of argument 3029 // space no matter how big they actually are. 3030 if ifaceIndir(rcvr) || rcvr.pointers() { 3031 ptrmap.append(1) 3032 } else { 3033 ptrmap.append(0) 3034 } 3035 offset += ptrSize 3036 } 3037 for _, arg := range t.in() { 3038 offset += -offset & uintptr(arg.align-1) 3039 addTypeBits(ptrmap, offset, arg) 3040 offset += arg.size 3041 } 3042 argSize = offset 3043 offset += -offset & (ptrSize - 1) 3044 retOffset = offset 3045 for _, res := range t.out() { 3046 offset += -offset & uintptr(res.align-1) 3047 addTypeBits(ptrmap, offset, res) 3048 offset += res.size 3049 } 3050 offset += -offset & (ptrSize - 1) 3051 3052 // build dummy rtype holding gc program 3053 x := &rtype{ 3054 align: ptrSize, 3055 size: offset, 3056 ptrdata: uintptr(ptrmap.n) * ptrSize, 3057 } 3058 if ptrmap.n > 0 { 3059 x.gcdata = &ptrmap.data[0] 3060 } 3061 3062 var s string 3063 if rcvr != nil { 3064 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3065 } else { 3066 s = "funcargs(" + t.String() + ")" 3067 } 3068 x.str = resolveReflectName(newName(s, "", false)) 3069 3070 // cache result for future callers 3071 framePool = &sync.Pool{New: func() interface{} { 3072 return unsafe_New(x) 3073 }} 3074 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 3075 t: x, 3076 argSize: argSize, 3077 retOffset: retOffset, 3078 stack: ptrmap, 3079 framePool: framePool, 3080 }) 3081 lt := lti.(layoutType) 3082 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3083 } 3084 3085 // ifaceIndir reports whether t is stored indirectly in an interface value. 3086 func ifaceIndir(t *rtype) bool { 3087 return t.kind&kindDirectIface == 0 3088 } 3089 3090 // Note: this type must agree with runtime.bitvector. 3091 type bitVector struct { 3092 n uint32 // number of bits 3093 data []byte 3094 } 3095 3096 // append a bit to the bitmap. 3097 func (bv *bitVector) append(bit uint8) { 3098 if bv.n%8 == 0 { 3099 bv.data = append(bv.data, 0) 3100 } 3101 bv.data[bv.n/8] |= bit << (bv.n % 8) 3102 bv.n++ 3103 } 3104 3105 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3106 if t.ptrdata == 0 { 3107 return 3108 } 3109 3110 switch Kind(t.kind & kindMask) { 3111 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3112 // 1 pointer at start of representation 3113 for bv.n < uint32(offset/uintptr(ptrSize)) { 3114 bv.append(0) 3115 } 3116 bv.append(1) 3117 3118 case Interface: 3119 // 2 pointers 3120 for bv.n < uint32(offset/uintptr(ptrSize)) { 3121 bv.append(0) 3122 } 3123 bv.append(1) 3124 bv.append(1) 3125 3126 case Array: 3127 // repeat inner type 3128 tt := (*arrayType)(unsafe.Pointer(t)) 3129 for i := 0; i < int(tt.len); i++ { 3130 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3131 } 3132 3133 case Struct: 3134 // apply fields 3135 tt := (*structType)(unsafe.Pointer(t)) 3136 for i := range tt.fields { 3137 f := &tt.fields[i] 3138 addTypeBits(bv, offset+f.offset(), f.typ) 3139 } 3140 } 3141 } 3142 3143 func InternalType(t Type) *rtype { 3144 return t.(*rtype) 3145 }