github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "internal/unsafeheader" 20 "strconv" 21 "sync" 22 "unicode" 23 "unicode/utf8" 24 "unsafe" 25 ) 26 27 // Type is the representation of a Go type. 28 // 29 // Not all methods apply to all kinds of types. Restrictions, 30 // if any, are noted in the documentation for each method. 31 // Use the Kind method to find out the kind of type before 32 // calling kind-specific methods. Calling a method 33 // inappropriate to the kind of type causes a run-time panic. 34 // 35 // Type values are comparable, such as with the == operator, 36 // so they can be used as map keys. 37 // Two Type values are equal if they represent identical types. 38 type Type interface { 39 // Methods applicable to all types. 40 41 // Align returns the alignment in bytes of a value of 42 // this type when allocated in memory. 43 Align() int 44 45 // FieldAlign returns the alignment in bytes of a value of 46 // this type when used as a field in a struct. 47 FieldAlign() int 48 49 // Method returns the i'th method in the type's method set. 50 // It panics if i is not in the range [0, NumMethod()). 51 // 52 // For a non-interface type T or *T, the returned Method's Type and Func 53 // fields describe a function whose first argument is the receiver, 54 // and only exported methods are accessible. 55 // 56 // For an interface type, the returned Method's Type field gives the 57 // method signature, without a receiver, and the Func field is nil. 58 // 59 // Methods are sorted in lexicographic order. 60 Method(int) Method 61 62 // MethodByName returns the method with that name in the type's 63 // method set and a boolean indicating if the method was found. 64 // 65 // For a non-interface type T or *T, the returned Method's Type and Func 66 // fields describe a function whose first argument is the receiver. 67 // 68 // For an interface type, the returned Method's Type field gives the 69 // method signature, without a receiver, and the Func field is nil. 70 MethodByName(string) (Method, bool) 71 72 // NumMethod returns the number of methods accessible using Method. 73 // 74 // Note that NumMethod counts unexported methods only for interface types. 75 NumMethod() int 76 77 // Name returns the type's name within its package for a defined type. 78 // For other (non-defined) types it returns the empty string. 79 Name() string 80 81 // PkgPath returns a defined type's package path, that is, the import path 82 // that uniquely identifies the package, such as "encoding/base64". 83 // If the type was predeclared (string, error) or not defined (*T, struct{}, 84 // []int, or A where A is an alias for a non-defined type), the package path 85 // will be the empty string. 86 PkgPath() string 87 88 // Size returns the number of bytes needed to store 89 // a value of the given type; it is analogous to unsafe.Sizeof. 90 Size() uintptr 91 92 // String returns a string representation of the type. 93 // The string representation may use shortened package names 94 // (e.g., base64 instead of "encoding/base64") and is not 95 // guaranteed to be unique among types. To test for type identity, 96 // compare the Types directly. 97 String() string 98 99 // Kind returns the specific kind of this type. 100 Kind() Kind 101 102 // Implements reports whether the type implements the interface type u. 103 Implements(u Type) bool 104 105 // AssignableTo reports whether a value of the type is assignable to type u. 106 AssignableTo(u Type) bool 107 108 // ConvertibleTo reports whether a value of the type is convertible to type u. 109 ConvertibleTo(u Type) bool 110 111 // Comparable reports whether values of this type are comparable. 112 Comparable() bool 113 114 // Methods applicable only to some types, depending on Kind. 115 // The methods allowed for each kind are: 116 // 117 // Int*, Uint*, Float*, Complex*: Bits 118 // Array: Elem, Len 119 // Chan: ChanDir, Elem 120 // Func: In, NumIn, Out, NumOut, IsVariadic. 121 // Map: Key, Elem 122 // Ptr: Elem 123 // Slice: Elem 124 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 125 126 // Bits returns the size of the type in bits. 127 // It panics if the type's Kind is not one of the 128 // sized or unsized Int, Uint, Float, or Complex kinds. 129 Bits() int 130 131 // ChanDir returns a channel type's direction. 132 // It panics if the type's Kind is not Chan. 133 ChanDir() ChanDir 134 135 // IsVariadic reports whether a function type's final input parameter 136 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 137 // implicit actual type []T. 138 // 139 // For concreteness, if t represents func(x int, y ... float64), then 140 // 141 // t.NumIn() == 2 142 // t.In(0) is the reflect.Type for "int" 143 // t.In(1) is the reflect.Type for "[]float64" 144 // t.IsVariadic() == true 145 // 146 // IsVariadic panics if the type's Kind is not Func. 147 IsVariadic() bool 148 149 // Elem returns a type's element type. 150 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 151 Elem() Type 152 153 // Field returns a struct type's i'th field. 154 // It panics if the type's Kind is not Struct. 155 // It panics if i is not in the range [0, NumField()). 156 Field(i int) StructField 157 158 // FieldByIndex returns the nested field corresponding 159 // to the index sequence. It is equivalent to calling Field 160 // successively for each index i. 161 // It panics if the type's Kind is not Struct. 162 FieldByIndex(index []int) StructField 163 164 // FieldByName returns the struct field with the given name 165 // and a boolean indicating if the field was found. 166 FieldByName(name string) (StructField, bool) 167 168 // FieldByNameFunc returns the struct field with a name 169 // that satisfies the match function and a boolean indicating if 170 // the field was found. 171 // 172 // FieldByNameFunc considers the fields in the struct itself 173 // and then the fields in any embedded structs, in breadth first order, 174 // stopping at the shallowest nesting depth containing one or more 175 // fields satisfying the match function. If multiple fields at that depth 176 // satisfy the match function, they cancel each other 177 // and FieldByNameFunc returns no match. 178 // This behavior mirrors Go's handling of name lookup in 179 // structs containing embedded fields. 180 FieldByNameFunc(match func(string) bool) (StructField, bool) 181 182 // In returns the type of a function type's i'th input parameter. 183 // It panics if the type's Kind is not Func. 184 // It panics if i is not in the range [0, NumIn()). 185 In(i int) Type 186 187 // Key returns a map type's key type. 188 // It panics if the type's Kind is not Map. 189 Key() Type 190 191 // Len returns an array type's length. 192 // It panics if the type's Kind is not Array. 193 Len() int 194 195 // NumField returns a struct type's field count. 196 // It panics if the type's Kind is not Struct. 197 NumField() int 198 199 // NumIn returns a function type's input parameter count. 200 // It panics if the type's Kind is not Func. 201 NumIn() int 202 203 // NumOut returns a function type's output parameter count. 204 // It panics if the type's Kind is not Func. 205 NumOut() int 206 207 // Out returns the type of a function type's i'th output parameter. 208 // It panics if the type's Kind is not Func. 209 // It panics if i is not in the range [0, NumOut()). 210 Out(i int) Type 211 212 common() *rtype 213 uncommon() *uncommonType 214 } 215 216 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 217 // if the names are equal, even if they are unexported names originating 218 // in different packages. The practical effect of this is that the result of 219 // t.FieldByName("x") is not well defined if the struct type t contains 220 // multiple fields named x (embedded from different packages). 221 // FieldByName may return one of the fields named x or may report that there are none. 222 // See https://golang.org/issue/4876 for more details. 223 224 /* 225 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 226 * A few are known to ../runtime/type.go to convey to debuggers. 227 * They are also known to ../runtime/type.go. 228 */ 229 230 // A Kind represents the specific kind of type that a Type represents. 231 // The zero Kind is not a valid kind. 232 type Kind uint 233 234 const ( 235 Invalid Kind = iota 236 Bool 237 Int 238 Int8 239 Int16 240 Int32 241 Int64 242 Uint 243 Uint8 244 Uint16 245 Uint32 246 Uint64 247 Uintptr 248 Float32 249 Float64 250 Complex64 251 Complex128 252 Array 253 Chan 254 Func 255 Interface 256 Map 257 Ptr 258 Slice 259 String 260 Struct 261 UnsafePointer 262 ) 263 264 // tflag is used by an rtype to signal what extra type information is 265 // available in the memory directly following the rtype value. 266 // 267 // tflag values must be kept in sync with copies in: 268 // cmd/compile/internal/gc/reflect.go 269 // cmd/link/internal/ld/decodesym.go 270 // runtime/type.go 271 type tflag uint8 272 273 const ( 274 // tflagUncommon means that there is a pointer, *uncommonType, 275 // just beyond the outer type structure. 276 // 277 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 278 // then t has uncommonType data and it can be accessed as: 279 // 280 // type tUncommon struct { 281 // structType 282 // u uncommonType 283 // } 284 // u := &(*tUncommon)(unsafe.Pointer(t)).u 285 tflagUncommon tflag = 1 << 0 286 287 // tflagExtraStar means the name in the str field has an 288 // extraneous '*' prefix. This is because for most types T in 289 // a program, the type *T also exists and reusing the str data 290 // saves binary size. 291 tflagExtraStar tflag = 1 << 1 292 293 // tflagNamed means the type has a name. 294 tflagNamed tflag = 1 << 2 295 296 // tflagRegularMemory means that equal and hash functions can treat 297 // this type as a single region of t.size bytes. 298 tflagRegularMemory tflag = 1 << 3 299 ) 300 301 // rtype is the common implementation of most values. 302 // It is embedded in other struct types. 303 // 304 // rtype must be kept in sync with ../runtime/type.go:/^type._type. 305 type rtype struct { 306 size uintptr 307 ptrdata uintptr // number of bytes in the type that can contain pointers 308 hash uint32 // hash of type; avoids computation in hash tables 309 tflag tflag // extra type information flags 310 align uint8 // alignment of variable with this type 311 fieldAlign uint8 // alignment of struct field with this type 312 kind uint8 // enumeration for C 313 // function for comparing objects of this type 314 // (ptr to object A, ptr to object B) -> ==? 315 equal func(unsafe.Pointer, unsafe.Pointer) bool 316 gcdata *byte // garbage collection data 317 str nameOff // string form 318 ptrToThis typeOff // type for pointer to this type, may be zero 319 } 320 321 // Method on non-interface type 322 type method struct { 323 name nameOff // name of method 324 mtyp typeOff // method type (without receiver) 325 ifn textOff // fn used in interface call (one-word receiver) 326 tfn textOff // fn used for normal method call 327 } 328 329 // uncommonType is present only for defined types or types with methods 330 // (if T is a defined type, the uncommonTypes for T and *T have methods). 331 // Using a pointer to this struct reduces the overall size required 332 // to describe a non-defined type with no methods. 333 type uncommonType struct { 334 pkgPath nameOff // import path; empty for built-in types like int, string 335 mcount uint16 // number of methods 336 xcount uint16 // number of exported methods 337 moff uint32 // offset from this uncommontype to [mcount]method 338 _ uint32 // unused 339 } 340 341 // ChanDir represents a channel type's direction. 342 type ChanDir int 343 344 const ( 345 RecvDir ChanDir = 1 << iota // <-chan 346 SendDir // chan<- 347 BothDir = RecvDir | SendDir // chan 348 ) 349 350 // arrayType represents a fixed array type. 351 type arrayType struct { 352 rtype 353 elem *rtype // array element type 354 slice *rtype // slice type 355 len uintptr 356 } 357 358 // chanType represents a channel type. 359 type chanType struct { 360 rtype 361 elem *rtype // channel element type 362 dir uintptr // channel direction (ChanDir) 363 } 364 365 // funcType represents a function type. 366 // 367 // A *rtype for each in and out parameter is stored in an array that 368 // directly follows the funcType (and possibly its uncommonType). So 369 // a function type with one method, one input, and one output is: 370 // 371 // struct { 372 // funcType 373 // uncommonType 374 // [2]*rtype // [0] is in, [1] is out 375 // } 376 type funcType struct { 377 rtype 378 inCount uint16 379 outCount uint16 // top bit is set if last input parameter is ... 380 } 381 382 // imethod represents a method on an interface type 383 type imethod struct { 384 name nameOff // name of method 385 typ typeOff // .(*FuncType) underneath 386 } 387 388 // interfaceType represents an interface type. 389 type interfaceType struct { 390 rtype 391 pkgPath name // import path 392 methods []imethod // sorted by hash 393 } 394 395 // mapType represents a map type. 396 type mapType struct { 397 rtype 398 key *rtype // map key type 399 elem *rtype // map element (value) type 400 bucket *rtype // internal bucket structure 401 // function for hashing keys (ptr to key, seed) -> hash 402 hasher func(unsafe.Pointer, uintptr) uintptr 403 keysize uint8 // size of key slot 404 valuesize uint8 // size of value slot 405 bucketsize uint16 // size of bucket 406 flags uint32 407 } 408 409 // ptrType represents a pointer type. 410 type ptrType struct { 411 rtype 412 elem *rtype // pointer element (pointed at) type 413 } 414 415 // sliceType represents a slice type. 416 type sliceType struct { 417 rtype 418 elem *rtype // slice element type 419 } 420 421 // Struct field 422 type structField struct { 423 name name // name is always non-empty 424 typ *rtype // type of field 425 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded 426 } 427 428 func (f *structField) offset() uintptr { 429 return f.offsetEmbed >> 1 430 } 431 432 func (f *structField) embedded() bool { 433 return f.offsetEmbed&1 != 0 434 } 435 436 // structType represents a struct type. 437 type structType struct { 438 rtype 439 pkgPath name 440 fields []structField // sorted by offset 441 } 442 443 // name is an encoded type name with optional extra data. 444 // 445 // The first byte is a bit field containing: 446 // 447 // 1<<0 the name is exported 448 // 1<<1 tag data follows the name 449 // 1<<2 pkgPath nameOff follows the name and tag 450 // 451 // The next two bytes are the data length: 452 // 453 // l := uint16(data[1])<<8 | uint16(data[2]) 454 // 455 // Bytes [3:3+l] are the string data. 456 // 457 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 458 // with the data following. 459 // 460 // If the import path follows, then 4 bytes at the end of 461 // the data form a nameOff. The import path is only set for concrete 462 // methods that are defined in a different package than their type. 463 // 464 // If a name starts with "*", then the exported bit represents 465 // whether the pointed to type is exported. 466 type name struct { 467 bytes *byte 468 } 469 470 func (n name) data(off int, whySafe string) *byte { 471 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe)) 472 } 473 474 func (n name) isExported() bool { 475 return (*n.bytes)&(1<<0) != 0 476 } 477 478 func (n name) nameLen() int { 479 return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field"))) 480 } 481 482 func (n name) tagLen() int { 483 if *n.data(0, "name flag field")&(1<<1) == 0 { 484 return 0 485 } 486 off := 3 + n.nameLen() 487 return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field"))) 488 } 489 490 func (n name) name() (s string) { 491 if n.bytes == nil { 492 return 493 } 494 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 495 496 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 497 hdr.Data = unsafe.Pointer(&b[3]) 498 hdr.Len = int(b[1])<<8 | int(b[2]) 499 return s 500 } 501 502 func (n name) tag() (s string) { 503 tl := n.tagLen() 504 if tl == 0 { 505 return "" 506 } 507 nl := n.nameLen() 508 hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) 509 hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string")) 510 hdr.Len = tl 511 return s 512 } 513 514 func (n name) pkgPath() string { 515 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 { 516 return "" 517 } 518 off := 3 + n.nameLen() 519 if tl := n.tagLen(); tl > 0 { 520 off += 2 + tl 521 } 522 var nameOff int32 523 // Note that this field may not be aligned in memory, 524 // so we cannot use a direct int32 assignment here. 525 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:]) 526 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 527 return pkgPathName.name() 528 } 529 530 func newName(n, tag string, exported bool) name { 531 if len(n) > 1<<16-1 { 532 panic("reflect.nameFrom: name too long: " + n) 533 } 534 if len(tag) > 1<<16-1 { 535 panic("reflect.nameFrom: tag too long: " + tag) 536 } 537 538 var bits byte 539 l := 1 + 2 + len(n) 540 if exported { 541 bits |= 1 << 0 542 } 543 if len(tag) > 0 { 544 l += 2 + len(tag) 545 bits |= 1 << 1 546 } 547 548 b := make([]byte, l) 549 b[0] = bits 550 b[1] = uint8(len(n) >> 8) 551 b[2] = uint8(len(n)) 552 copy(b[3:], n) 553 if len(tag) > 0 { 554 tb := b[3+len(n):] 555 tb[0] = uint8(len(tag) >> 8) 556 tb[1] = uint8(len(tag)) 557 copy(tb[2:], tag) 558 } 559 560 return name{bytes: &b[0]} 561 } 562 563 /* 564 * The compiler knows the exact layout of all the data structures above. 565 * The compiler does not know about the data structures and methods below. 566 */ 567 568 // Method represents a single method. 569 type Method struct { 570 // Name is the method name. 571 // PkgPath is the package path that qualifies a lower case (unexported) 572 // method name. It is empty for upper case (exported) method names. 573 // The combination of PkgPath and Name uniquely identifies a method 574 // in a method set. 575 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 576 Name string 577 PkgPath string 578 579 Type Type // method type 580 Func Value // func with receiver as first argument 581 Index int // index for Type.Method 582 } 583 584 const ( 585 kindDirectIface = 1 << 5 586 kindGCProg = 1 << 6 // Type.gc points to GC program 587 kindMask = (1 << 5) - 1 588 ) 589 590 // String returns the name of k. 591 func (k Kind) String() string { 592 if int(k) < len(kindNames) { 593 return kindNames[k] 594 } 595 return "kind" + strconv.Itoa(int(k)) 596 } 597 598 var kindNames = []string{ 599 Invalid: "invalid", 600 Bool: "bool", 601 Int: "int", 602 Int8: "int8", 603 Int16: "int16", 604 Int32: "int32", 605 Int64: "int64", 606 Uint: "uint", 607 Uint8: "uint8", 608 Uint16: "uint16", 609 Uint32: "uint32", 610 Uint64: "uint64", 611 Uintptr: "uintptr", 612 Float32: "float32", 613 Float64: "float64", 614 Complex64: "complex64", 615 Complex128: "complex128", 616 Array: "array", 617 Chan: "chan", 618 Func: "func", 619 Interface: "interface", 620 Map: "map", 621 Ptr: "ptr", 622 Slice: "slice", 623 String: "string", 624 Struct: "struct", 625 UnsafePointer: "unsafe.Pointer", 626 } 627 628 func (t *uncommonType) methods() []method { 629 if t.mcount == 0 { 630 return nil 631 } 632 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] 633 } 634 635 func (t *uncommonType) exportedMethods() []method { 636 if t.xcount == 0 { 637 return nil 638 } 639 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount] 640 } 641 642 // resolveNameOff resolves a name offset from a base pointer. 643 // The (*rtype).nameOff method is a convenience wrapper for this function. 644 // Implemented in the runtime package. 645 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 646 647 // resolveTypeOff resolves an *rtype offset from a base type. 648 // The (*rtype).typeOff method is a convenience wrapper for this function. 649 // Implemented in the runtime package. 650 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 651 652 // resolveTextOff resolves a function pointer offset from a base type. 653 // The (*rtype).textOff method is a convenience wrapper for this function. 654 // Implemented in the runtime package. 655 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 656 657 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 658 // It returns a new ID that can be used as a typeOff or textOff, and will 659 // be resolved correctly. Implemented in the runtime package. 660 func addReflectOff(ptr unsafe.Pointer) int32 661 662 // resolveReflectName adds a name to the reflection lookup map in the runtime. 663 // It returns a new nameOff that can be used to refer to the pointer. 664 func resolveReflectName(n name) nameOff { 665 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 666 } 667 668 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 669 // It returns a new typeOff that can be used to refer to the pointer. 670 func resolveReflectType(t *rtype) typeOff { 671 return typeOff(addReflectOff(unsafe.Pointer(t))) 672 } 673 674 // resolveReflectText adds a function pointer to the reflection lookup map in 675 // the runtime. It returns a new textOff that can be used to refer to the 676 // pointer. 677 func resolveReflectText(ptr unsafe.Pointer) textOff { 678 return textOff(addReflectOff(ptr)) 679 } 680 681 type nameOff int32 // offset to a name 682 type typeOff int32 // offset to an *rtype 683 type textOff int32 // offset from top of text section 684 685 func (t *rtype) nameOff(off nameOff) name { 686 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 687 } 688 689 func (t *rtype) typeOff(off typeOff) *rtype { 690 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 691 } 692 693 func (t *rtype) textOff(off textOff) unsafe.Pointer { 694 return resolveTextOff(unsafe.Pointer(t), int32(off)) 695 } 696 697 func (t *rtype) uncommon() *uncommonType { 698 if t.tflag&tflagUncommon == 0 { 699 return nil 700 } 701 switch t.Kind() { 702 case Struct: 703 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 704 case Ptr: 705 type u struct { 706 ptrType 707 u uncommonType 708 } 709 return &(*u)(unsafe.Pointer(t)).u 710 case Func: 711 type u struct { 712 funcType 713 u uncommonType 714 } 715 return &(*u)(unsafe.Pointer(t)).u 716 case Slice: 717 type u struct { 718 sliceType 719 u uncommonType 720 } 721 return &(*u)(unsafe.Pointer(t)).u 722 case Array: 723 type u struct { 724 arrayType 725 u uncommonType 726 } 727 return &(*u)(unsafe.Pointer(t)).u 728 case Chan: 729 type u struct { 730 chanType 731 u uncommonType 732 } 733 return &(*u)(unsafe.Pointer(t)).u 734 case Map: 735 type u struct { 736 mapType 737 u uncommonType 738 } 739 return &(*u)(unsafe.Pointer(t)).u 740 case Interface: 741 type u struct { 742 interfaceType 743 u uncommonType 744 } 745 return &(*u)(unsafe.Pointer(t)).u 746 default: 747 type u struct { 748 rtype 749 u uncommonType 750 } 751 return &(*u)(unsafe.Pointer(t)).u 752 } 753 } 754 755 func (t *rtype) String() string { 756 s := t.nameOff(t.str).name() 757 if t.tflag&tflagExtraStar != 0 { 758 return s[1:] 759 } 760 return s 761 } 762 763 func (t *rtype) Size() uintptr { return t.size } 764 765 func (t *rtype) Bits() int { 766 if t == nil { 767 panic("reflect: Bits of nil Type") 768 } 769 k := t.Kind() 770 if k < Int || k > Complex128 { 771 panic("reflect: Bits of non-arithmetic Type " + t.String()) 772 } 773 return int(t.size) * 8 774 } 775 776 func (t *rtype) Align() int { return int(t.align) } 777 778 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 779 780 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 781 782 func (t *rtype) pointers() bool { return t.ptrdata != 0 } 783 784 func (t *rtype) common() *rtype { return t } 785 786 func (t *rtype) exportedMethods() []method { 787 ut := t.uncommon() 788 if ut == nil { 789 return nil 790 } 791 return ut.exportedMethods() 792 } 793 794 func (t *rtype) NumMethod() int { 795 if t.Kind() == Interface { 796 tt := (*interfaceType)(unsafe.Pointer(t)) 797 return tt.NumMethod() 798 } 799 return len(t.exportedMethods()) 800 } 801 802 func (t *rtype) Method(i int) (m Method) { 803 if t.Kind() == Interface { 804 tt := (*interfaceType)(unsafe.Pointer(t)) 805 return tt.Method(i) 806 } 807 methods := t.exportedMethods() 808 if i < 0 || i >= len(methods) { 809 panic("reflect: Method index out of range") 810 } 811 p := methods[i] 812 pname := t.nameOff(p.name) 813 m.Name = pname.name() 814 fl := flag(Func) 815 mtyp := t.typeOff(p.mtyp) 816 ft := (*funcType)(unsafe.Pointer(mtyp)) 817 in := make([]Type, 0, 1+len(ft.in())) 818 in = append(in, t) 819 for _, arg := range ft.in() { 820 in = append(in, arg) 821 } 822 out := make([]Type, 0, len(ft.out())) 823 for _, ret := range ft.out() { 824 out = append(out, ret) 825 } 826 mt := FuncOf(in, out, ft.IsVariadic()) 827 m.Type = mt 828 tfn := t.textOff(p.tfn) 829 fn := unsafe.Pointer(&tfn) 830 m.Func = Value{mt.(*rtype), fn, fl} 831 832 m.Index = i 833 return m 834 } 835 836 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 837 if t.Kind() == Interface { 838 tt := (*interfaceType)(unsafe.Pointer(t)) 839 return tt.MethodByName(name) 840 } 841 ut := t.uncommon() 842 if ut == nil { 843 return Method{}, false 844 } 845 // TODO(mdempsky): Binary search. 846 for i, p := range ut.exportedMethods() { 847 if t.nameOff(p.name).name() == name { 848 return t.Method(i), true 849 } 850 } 851 return Method{}, false 852 } 853 854 func (t *rtype) PkgPath() string { 855 if t.tflag&tflagNamed == 0 { 856 return "" 857 } 858 ut := t.uncommon() 859 if ut == nil { 860 return "" 861 } 862 return t.nameOff(ut.pkgPath).name() 863 } 864 865 func (t *rtype) hasName() bool { 866 return t.tflag&tflagNamed != 0 867 } 868 869 func (t *rtype) Name() string { 870 if !t.hasName() { 871 return "" 872 } 873 s := t.String() 874 i := len(s) - 1 875 for i >= 0 && s[i] != '.' { 876 i-- 877 } 878 return s[i+1:] 879 } 880 881 func (t *rtype) ChanDir() ChanDir { 882 if t.Kind() != Chan { 883 panic("reflect: ChanDir of non-chan type " + t.String()) 884 } 885 tt := (*chanType)(unsafe.Pointer(t)) 886 return ChanDir(tt.dir) 887 } 888 889 func (t *rtype) IsVariadic() bool { 890 if t.Kind() != Func { 891 panic("reflect: IsVariadic of non-func type " + t.String()) 892 } 893 tt := (*funcType)(unsafe.Pointer(t)) 894 return tt.outCount&(1<<15) != 0 895 } 896 897 func (t *rtype) Elem() Type { 898 switch t.Kind() { 899 case Array: 900 tt := (*arrayType)(unsafe.Pointer(t)) 901 return toType(tt.elem) 902 case Chan: 903 tt := (*chanType)(unsafe.Pointer(t)) 904 return toType(tt.elem) 905 case Map: 906 tt := (*mapType)(unsafe.Pointer(t)) 907 return toType(tt.elem) 908 case Ptr: 909 tt := (*ptrType)(unsafe.Pointer(t)) 910 return toType(tt.elem) 911 case Slice: 912 tt := (*sliceType)(unsafe.Pointer(t)) 913 return toType(tt.elem) 914 } 915 panic("reflect: Elem of invalid type " + t.String()) 916 } 917 918 func (t *rtype) Field(i int) StructField { 919 if t.Kind() != Struct { 920 panic("reflect: Field of non-struct type " + t.String()) 921 } 922 tt := (*structType)(unsafe.Pointer(t)) 923 return tt.Field(i) 924 } 925 926 func (t *rtype) FieldByIndex(index []int) StructField { 927 if t.Kind() != Struct { 928 panic("reflect: FieldByIndex of non-struct type " + t.String()) 929 } 930 tt := (*structType)(unsafe.Pointer(t)) 931 return tt.FieldByIndex(index) 932 } 933 934 func (t *rtype) FieldByName(name string) (StructField, bool) { 935 if t.Kind() != Struct { 936 panic("reflect: FieldByName of non-struct type " + t.String()) 937 } 938 tt := (*structType)(unsafe.Pointer(t)) 939 return tt.FieldByName(name) 940 } 941 942 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 943 if t.Kind() != Struct { 944 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 945 } 946 tt := (*structType)(unsafe.Pointer(t)) 947 return tt.FieldByNameFunc(match) 948 } 949 950 func (t *rtype) In(i int) Type { 951 if t.Kind() != Func { 952 panic("reflect: In of non-func type " + t.String()) 953 } 954 tt := (*funcType)(unsafe.Pointer(t)) 955 return toType(tt.in()[i]) 956 } 957 958 func (t *rtype) Key() Type { 959 if t.Kind() != Map { 960 panic("reflect: Key of non-map type " + t.String()) 961 } 962 tt := (*mapType)(unsafe.Pointer(t)) 963 return toType(tt.key) 964 } 965 966 func (t *rtype) Len() int { 967 if t.Kind() != Array { 968 panic("reflect: Len of non-array type " + t.String()) 969 } 970 tt := (*arrayType)(unsafe.Pointer(t)) 971 return int(tt.len) 972 } 973 974 func (t *rtype) NumField() int { 975 if t.Kind() != Struct { 976 panic("reflect: NumField of non-struct type " + t.String()) 977 } 978 tt := (*structType)(unsafe.Pointer(t)) 979 return len(tt.fields) 980 } 981 982 func (t *rtype) NumIn() int { 983 if t.Kind() != Func { 984 panic("reflect: NumIn of non-func type " + t.String()) 985 } 986 tt := (*funcType)(unsafe.Pointer(t)) 987 return int(tt.inCount) 988 } 989 990 func (t *rtype) NumOut() int { 991 if t.Kind() != Func { 992 panic("reflect: NumOut of non-func type " + t.String()) 993 } 994 tt := (*funcType)(unsafe.Pointer(t)) 995 return len(tt.out()) 996 } 997 998 func (t *rtype) Out(i int) Type { 999 if t.Kind() != Func { 1000 panic("reflect: Out of non-func type " + t.String()) 1001 } 1002 tt := (*funcType)(unsafe.Pointer(t)) 1003 return toType(tt.out()[i]) 1004 } 1005 1006 func (t *funcType) in() []*rtype { 1007 uadd := unsafe.Sizeof(*t) 1008 if t.tflag&tflagUncommon != 0 { 1009 uadd += unsafe.Sizeof(uncommonType{}) 1010 } 1011 if t.inCount == 0 { 1012 return nil 1013 } 1014 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount] 1015 } 1016 1017 func (t *funcType) out() []*rtype { 1018 uadd := unsafe.Sizeof(*t) 1019 if t.tflag&tflagUncommon != 0 { 1020 uadd += unsafe.Sizeof(uncommonType{}) 1021 } 1022 outCount := t.outCount & (1<<15 - 1) 1023 if outCount == 0 { 1024 return nil 1025 } 1026 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount] 1027 } 1028 1029 // add returns p+x. 1030 // 1031 // The whySafe string is ignored, so that the function still inlines 1032 // as efficiently as p+x, but all call sites should use the string to 1033 // record why the addition is safe, which is to say why the addition 1034 // does not cause x to advance to the very end of p's allocation 1035 // and therefore point incorrectly at the next block in memory. 1036 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 1037 return unsafe.Pointer(uintptr(p) + x) 1038 } 1039 1040 func (d ChanDir) String() string { 1041 switch d { 1042 case SendDir: 1043 return "chan<-" 1044 case RecvDir: 1045 return "<-chan" 1046 case BothDir: 1047 return "chan" 1048 } 1049 return "ChanDir" + strconv.Itoa(int(d)) 1050 } 1051 1052 // Method returns the i'th method in the type's method set. 1053 func (t *interfaceType) Method(i int) (m Method) { 1054 if i < 0 || i >= len(t.methods) { 1055 return 1056 } 1057 p := &t.methods[i] 1058 pname := t.nameOff(p.name) 1059 m.Name = pname.name() 1060 if !pname.isExported() { 1061 m.PkgPath = pname.pkgPath() 1062 if m.PkgPath == "" { 1063 m.PkgPath = t.pkgPath.name() 1064 } 1065 } 1066 m.Type = toType(t.typeOff(p.typ)) 1067 m.Index = i 1068 return 1069 } 1070 1071 // NumMethod returns the number of interface methods in the type's method set. 1072 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1073 1074 // MethodByName method with the given name in the type's method set. 1075 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1076 if t == nil { 1077 return 1078 } 1079 var p *imethod 1080 for i := range t.methods { 1081 p = &t.methods[i] 1082 if t.nameOff(p.name).name() == name { 1083 return t.Method(i), true 1084 } 1085 } 1086 return 1087 } 1088 1089 // A StructField describes a single field in a struct. 1090 type StructField struct { 1091 // Name is the field name. 1092 Name string 1093 // PkgPath is the package path that qualifies a lower case (unexported) 1094 // field name. It is empty for upper case (exported) field names. 1095 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1096 PkgPath string 1097 1098 Type Type // field type 1099 Tag StructTag // field tag string 1100 Offset uintptr // offset within struct, in bytes 1101 Index []int // index sequence for Type.FieldByIndex 1102 Anonymous bool // is an embedded field 1103 } 1104 1105 // A StructTag is the tag string in a struct field. 1106 // 1107 // By convention, tag strings are a concatenation of 1108 // optionally space-separated key:"value" pairs. 1109 // Each key is a non-empty string consisting of non-control 1110 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1111 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1112 // characters and Go string literal syntax. 1113 type StructTag string 1114 1115 // Get returns the value associated with key in the tag string. 1116 // If there is no such key in the tag, Get returns the empty string. 1117 // If the tag does not have the conventional format, the value 1118 // returned by Get is unspecified. To determine whether a tag is 1119 // explicitly set to the empty string, use Lookup. 1120 func (tag StructTag) Get(key string) string { 1121 v, _ := tag.Lookup(key) 1122 return v 1123 } 1124 1125 // Lookup returns the value associated with key in the tag string. 1126 // If the key is present in the tag the value (which may be empty) 1127 // is returned. Otherwise the returned value will be the empty string. 1128 // The ok return value reports whether the value was explicitly set in 1129 // the tag string. If the tag does not have the conventional format, 1130 // the value returned by Lookup is unspecified. 1131 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1132 // When modifying this code, also update the validateStructTag code 1133 // in cmd/vet/structtag.go. 1134 1135 for tag != "" { 1136 // Skip leading space. 1137 i := 0 1138 for i < len(tag) && tag[i] == ' ' { 1139 i++ 1140 } 1141 tag = tag[i:] 1142 if tag == "" { 1143 break 1144 } 1145 1146 // Scan to colon. A space, a quote or a control character is a syntax error. 1147 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1148 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1149 // as it is simpler to inspect the tag's bytes than the tag's runes. 1150 i = 0 1151 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1152 i++ 1153 } 1154 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1155 break 1156 } 1157 name := string(tag[:i]) 1158 tag = tag[i+1:] 1159 1160 // Scan quoted string to find value. 1161 i = 1 1162 for i < len(tag) && tag[i] != '"' { 1163 if tag[i] == '\\' { 1164 i++ 1165 } 1166 i++ 1167 } 1168 if i >= len(tag) { 1169 break 1170 } 1171 qvalue := string(tag[:i+1]) 1172 tag = tag[i+1:] 1173 1174 if key == name { 1175 value, err := strconv.Unquote(qvalue) 1176 if err != nil { 1177 break 1178 } 1179 return value, true 1180 } 1181 } 1182 return "", false 1183 } 1184 1185 // Field returns the i'th struct field. 1186 func (t *structType) Field(i int) (f StructField) { 1187 if i < 0 || i >= len(t.fields) { 1188 panic("reflect: Field index out of bounds") 1189 } 1190 p := &t.fields[i] 1191 f.Type = toType(p.typ) 1192 f.Name = p.name.name() 1193 f.Anonymous = p.embedded() 1194 if !p.name.isExported() { 1195 f.PkgPath = t.pkgPath.name() 1196 } 1197 if tag := p.name.tag(); tag != "" { 1198 f.Tag = StructTag(tag) 1199 } 1200 f.Offset = p.offset() 1201 1202 // NOTE(rsc): This is the only allocation in the interface 1203 // presented by a reflect.Type. It would be nice to avoid, 1204 // at least in the common cases, but we need to make sure 1205 // that misbehaving clients of reflect cannot affect other 1206 // uses of reflect. One possibility is CL 5371098, but we 1207 // postponed that ugliness until there is a demonstrated 1208 // need for the performance. This is issue 2320. 1209 f.Index = []int{i} 1210 return 1211 } 1212 1213 // TODO(gri): Should there be an error/bool indicator if the index 1214 // is wrong for FieldByIndex? 1215 1216 // FieldByIndex returns the nested field corresponding to index. 1217 func (t *structType) FieldByIndex(index []int) (f StructField) { 1218 f.Type = toType(&t.rtype) 1219 for i, x := range index { 1220 if i > 0 { 1221 ft := f.Type 1222 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1223 ft = ft.Elem() 1224 } 1225 f.Type = ft 1226 } 1227 f = f.Type.Field(x) 1228 } 1229 return 1230 } 1231 1232 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1233 type fieldScan struct { 1234 typ *structType 1235 index []int 1236 } 1237 1238 // FieldByNameFunc returns the struct field with a name that satisfies the 1239 // match function and a boolean to indicate if the field was found. 1240 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1241 // This uses the same condition that the Go language does: there must be a unique instance 1242 // of the match at a given depth level. If there are multiple instances of a match at the 1243 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1244 // The algorithm is breadth first search, one depth level at a time. 1245 1246 // The current and next slices are work queues: 1247 // current lists the fields to visit on this depth level, 1248 // and next lists the fields on the next lower level. 1249 current := []fieldScan{} 1250 next := []fieldScan{{typ: t}} 1251 1252 // nextCount records the number of times an embedded type has been 1253 // encountered and considered for queueing in the 'next' slice. 1254 // We only queue the first one, but we increment the count on each. 1255 // If a struct type T can be reached more than once at a given depth level, 1256 // then it annihilates itself and need not be considered at all when we 1257 // process that next depth level. 1258 var nextCount map[*structType]int 1259 1260 // visited records the structs that have been considered already. 1261 // Embedded pointer fields can create cycles in the graph of 1262 // reachable embedded types; visited avoids following those cycles. 1263 // It also avoids duplicated effort: if we didn't find the field in an 1264 // embedded type T at level 2, we won't find it in one at level 4 either. 1265 visited := map[*structType]bool{} 1266 1267 for len(next) > 0 { 1268 current, next = next, current[:0] 1269 count := nextCount 1270 nextCount = nil 1271 1272 // Process all the fields at this depth, now listed in 'current'. 1273 // The loop queues embedded fields found in 'next', for processing during the next 1274 // iteration. The multiplicity of the 'current' field counts is recorded 1275 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1276 for _, scan := range current { 1277 t := scan.typ 1278 if visited[t] { 1279 // We've looked through this type before, at a higher level. 1280 // That higher level would shadow the lower level we're now at, 1281 // so this one can't be useful to us. Ignore it. 1282 continue 1283 } 1284 visited[t] = true 1285 for i := range t.fields { 1286 f := &t.fields[i] 1287 // Find name and (for embedded field) type for field f. 1288 fname := f.name.name() 1289 var ntyp *rtype 1290 if f.embedded() { 1291 // Embedded field of type T or *T. 1292 ntyp = f.typ 1293 if ntyp.Kind() == Ptr { 1294 ntyp = ntyp.Elem().common() 1295 } 1296 } 1297 1298 // Does it match? 1299 if match(fname) { 1300 // Potential match 1301 if count[t] > 1 || ok { 1302 // Name appeared multiple times at this level: annihilate. 1303 return StructField{}, false 1304 } 1305 result = t.Field(i) 1306 result.Index = nil 1307 result.Index = append(result.Index, scan.index...) 1308 result.Index = append(result.Index, i) 1309 ok = true 1310 continue 1311 } 1312 1313 // Queue embedded struct fields for processing with next level, 1314 // but only if we haven't seen a match yet at this level and only 1315 // if the embedded types haven't already been queued. 1316 if ok || ntyp == nil || ntyp.Kind() != Struct { 1317 continue 1318 } 1319 styp := (*structType)(unsafe.Pointer(ntyp)) 1320 if nextCount[styp] > 0 { 1321 nextCount[styp] = 2 // exact multiple doesn't matter 1322 continue 1323 } 1324 if nextCount == nil { 1325 nextCount = map[*structType]int{} 1326 } 1327 nextCount[styp] = 1 1328 if count[t] > 1 { 1329 nextCount[styp] = 2 // exact multiple doesn't matter 1330 } 1331 var index []int 1332 index = append(index, scan.index...) 1333 index = append(index, i) 1334 next = append(next, fieldScan{styp, index}) 1335 } 1336 } 1337 if ok { 1338 break 1339 } 1340 } 1341 return 1342 } 1343 1344 // FieldByName returns the struct field with the given name 1345 // and a boolean to indicate if the field was found. 1346 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1347 // Quick check for top-level name, or struct without embedded fields. 1348 hasEmbeds := false 1349 if name != "" { 1350 for i := range t.fields { 1351 tf := &t.fields[i] 1352 if tf.name.name() == name { 1353 return t.Field(i), true 1354 } 1355 if tf.embedded() { 1356 hasEmbeds = true 1357 } 1358 } 1359 } 1360 if !hasEmbeds { 1361 return 1362 } 1363 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1364 } 1365 1366 // TypeOf returns the reflection Type that represents the dynamic type of i. 1367 // If i is a nil interface value, TypeOf returns nil. 1368 func TypeOf(i interface{}) Type { 1369 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1370 return toType(eface.typ) 1371 } 1372 1373 // ptrMap is the cache for PtrTo. 1374 var ptrMap sync.Map // map[*rtype]*ptrType 1375 1376 // PtrTo returns the pointer type with element t. 1377 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1378 func PtrTo(t Type) Type { 1379 return t.(*rtype).ptrTo() 1380 } 1381 1382 func (t *rtype) ptrTo() *rtype { 1383 if t.ptrToThis != 0 { 1384 return t.typeOff(t.ptrToThis) 1385 } 1386 1387 // Check the cache. 1388 if pi, ok := ptrMap.Load(t); ok { 1389 return &pi.(*ptrType).rtype 1390 } 1391 1392 // Look in known types. 1393 s := "*" + t.String() 1394 for _, tt := range typesByString(s) { 1395 p := (*ptrType)(unsafe.Pointer(tt)) 1396 if p.elem != t { 1397 continue 1398 } 1399 pi, _ := ptrMap.LoadOrStore(t, p) 1400 return &pi.(*ptrType).rtype 1401 } 1402 1403 // Create a new ptrType starting with the description 1404 // of an *unsafe.Pointer. 1405 var iptr interface{} = (*unsafe.Pointer)(nil) 1406 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1407 pp := *prototype 1408 1409 pp.str = resolveReflectName(newName(s, "", false)) 1410 pp.ptrToThis = 0 1411 1412 // For the type structures linked into the binary, the 1413 // compiler provides a good hash of the string. 1414 // Create a good hash for the new string by using 1415 // the FNV-1 hash's mixing function to combine the 1416 // old hash and the new "*". 1417 pp.hash = fnv1(t.hash, '*') 1418 1419 pp.elem = t 1420 1421 pi, _ := ptrMap.LoadOrStore(t, &pp) 1422 return &pi.(*ptrType).rtype 1423 } 1424 1425 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1426 func fnv1(x uint32, list ...byte) uint32 { 1427 for _, b := range list { 1428 x = x*16777619 ^ uint32(b) 1429 } 1430 return x 1431 } 1432 1433 func (t *rtype) Implements(u Type) bool { 1434 if u == nil { 1435 panic("reflect: nil type passed to Type.Implements") 1436 } 1437 if u.Kind() != Interface { 1438 panic("reflect: non-interface type passed to Type.Implements") 1439 } 1440 return implements(u.(*rtype), t) 1441 } 1442 1443 func (t *rtype) AssignableTo(u Type) bool { 1444 if u == nil { 1445 panic("reflect: nil type passed to Type.AssignableTo") 1446 } 1447 uu := u.(*rtype) 1448 return directlyAssignable(uu, t) || implements(uu, t) 1449 } 1450 1451 func (t *rtype) ConvertibleTo(u Type) bool { 1452 if u == nil { 1453 panic("reflect: nil type passed to Type.ConvertibleTo") 1454 } 1455 uu := u.(*rtype) 1456 return convertOp(uu, t) != nil 1457 } 1458 1459 func (t *rtype) Comparable() bool { 1460 return t.equal != nil 1461 } 1462 1463 // implements reports whether the type V implements the interface type T. 1464 func implements(T, V *rtype) bool { 1465 if T.Kind() != Interface { 1466 return false 1467 } 1468 t := (*interfaceType)(unsafe.Pointer(T)) 1469 if len(t.methods) == 0 { 1470 return true 1471 } 1472 1473 // The same algorithm applies in both cases, but the 1474 // method tables for an interface type and a concrete type 1475 // are different, so the code is duplicated. 1476 // In both cases the algorithm is a linear scan over the two 1477 // lists - T's methods and V's methods - simultaneously. 1478 // Since method tables are stored in a unique sorted order 1479 // (alphabetical, with no duplicate method names), the scan 1480 // through V's methods must hit a match for each of T's 1481 // methods along the way, or else V does not implement T. 1482 // This lets us run the scan in overall linear time instead of 1483 // the quadratic time a naive search would require. 1484 // See also ../runtime/iface.go. 1485 if V.Kind() == Interface { 1486 v := (*interfaceType)(unsafe.Pointer(V)) 1487 i := 0 1488 for j := 0; j < len(v.methods); j++ { 1489 tm := &t.methods[i] 1490 tmName := t.nameOff(tm.name) 1491 vm := &v.methods[j] 1492 vmName := V.nameOff(vm.name) 1493 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1494 if !tmName.isExported() { 1495 tmPkgPath := tmName.pkgPath() 1496 if tmPkgPath == "" { 1497 tmPkgPath = t.pkgPath.name() 1498 } 1499 vmPkgPath := vmName.pkgPath() 1500 if vmPkgPath == "" { 1501 vmPkgPath = v.pkgPath.name() 1502 } 1503 if tmPkgPath != vmPkgPath { 1504 continue 1505 } 1506 } 1507 if i++; i >= len(t.methods) { 1508 return true 1509 } 1510 } 1511 } 1512 return false 1513 } 1514 1515 v := V.uncommon() 1516 if v == nil { 1517 return false 1518 } 1519 i := 0 1520 vmethods := v.methods() 1521 for j := 0; j < int(v.mcount); j++ { 1522 tm := &t.methods[i] 1523 tmName := t.nameOff(tm.name) 1524 vm := vmethods[j] 1525 vmName := V.nameOff(vm.name) 1526 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1527 if !tmName.isExported() { 1528 tmPkgPath := tmName.pkgPath() 1529 if tmPkgPath == "" { 1530 tmPkgPath = t.pkgPath.name() 1531 } 1532 vmPkgPath := vmName.pkgPath() 1533 if vmPkgPath == "" { 1534 vmPkgPath = V.nameOff(v.pkgPath).name() 1535 } 1536 if tmPkgPath != vmPkgPath { 1537 continue 1538 } 1539 } 1540 if i++; i >= len(t.methods) { 1541 return true 1542 } 1543 } 1544 } 1545 return false 1546 } 1547 1548 // specialChannelAssignability reports whether a value x of channel type V 1549 // can be directly assigned (using memmove) to another channel type T. 1550 // https://golang.org/doc/go_spec.html#Assignability 1551 // T and V must be both of Chan kind. 1552 func specialChannelAssignability(T, V *rtype) bool { 1553 // Special case: 1554 // x is a bidirectional channel value, T is a channel type, 1555 // x's type V and T have identical element types, 1556 // and at least one of V or T is not a defined type. 1557 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1558 } 1559 1560 // directlyAssignable reports whether a value x of type V can be directly 1561 // assigned (using memmove) to a value of type T. 1562 // https://golang.org/doc/go_spec.html#Assignability 1563 // Ignoring the interface rules (implemented elsewhere) 1564 // and the ideal constant rules (no ideal constants at run time). 1565 func directlyAssignable(T, V *rtype) bool { 1566 // x's type V is identical to T? 1567 if T == V { 1568 return true 1569 } 1570 1571 // Otherwise at least one of T and V must not be defined 1572 // and they must have the same kind. 1573 if T.hasName() && V.hasName() || T.Kind() != V.Kind() { 1574 return false 1575 } 1576 1577 if T.Kind() == Chan && specialChannelAssignability(T, V) { 1578 return true 1579 } 1580 1581 // x's type T and V must have identical underlying types. 1582 return haveIdenticalUnderlyingType(T, V, true) 1583 } 1584 1585 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1586 if cmpTags { 1587 return T == V 1588 } 1589 1590 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1591 return false 1592 } 1593 1594 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1595 } 1596 1597 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1598 if T == V { 1599 return true 1600 } 1601 1602 kind := T.Kind() 1603 if kind != V.Kind() { 1604 return false 1605 } 1606 1607 // Non-composite types of equal kind have same underlying type 1608 // (the predefined instance of the type). 1609 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1610 return true 1611 } 1612 1613 // Composite types. 1614 switch kind { 1615 case Array: 1616 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1617 1618 case Chan: 1619 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1620 1621 case Func: 1622 t := (*funcType)(unsafe.Pointer(T)) 1623 v := (*funcType)(unsafe.Pointer(V)) 1624 if t.outCount != v.outCount || t.inCount != v.inCount { 1625 return false 1626 } 1627 for i := 0; i < t.NumIn(); i++ { 1628 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1629 return false 1630 } 1631 } 1632 for i := 0; i < t.NumOut(); i++ { 1633 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1634 return false 1635 } 1636 } 1637 return true 1638 1639 case Interface: 1640 t := (*interfaceType)(unsafe.Pointer(T)) 1641 v := (*interfaceType)(unsafe.Pointer(V)) 1642 if len(t.methods) == 0 && len(v.methods) == 0 { 1643 return true 1644 } 1645 // Might have the same methods but still 1646 // need a run time conversion. 1647 return false 1648 1649 case Map: 1650 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1651 1652 case Ptr, Slice: 1653 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1654 1655 case Struct: 1656 t := (*structType)(unsafe.Pointer(T)) 1657 v := (*structType)(unsafe.Pointer(V)) 1658 if len(t.fields) != len(v.fields) { 1659 return false 1660 } 1661 if t.pkgPath.name() != v.pkgPath.name() { 1662 return false 1663 } 1664 for i := range t.fields { 1665 tf := &t.fields[i] 1666 vf := &v.fields[i] 1667 if tf.name.name() != vf.name.name() { 1668 return false 1669 } 1670 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1671 return false 1672 } 1673 if cmpTags && tf.name.tag() != vf.name.tag() { 1674 return false 1675 } 1676 if tf.offsetEmbed != vf.offsetEmbed { 1677 return false 1678 } 1679 } 1680 return true 1681 } 1682 1683 return false 1684 } 1685 1686 // typelinks is implemented in package runtime. 1687 // It returns a slice of the sections in each module, 1688 // and a slice of *rtype offsets in each module. 1689 // 1690 // The types in each module are sorted by string. That is, the first 1691 // two linked types of the first module are: 1692 // 1693 // d0 := sections[0] 1694 // t1 := (*rtype)(add(d0, offset[0][0])) 1695 // t2 := (*rtype)(add(d0, offset[0][1])) 1696 // 1697 // and 1698 // 1699 // t1.String() < t2.String() 1700 // 1701 // Note that strings are not unique identifiers for types: 1702 // there can be more than one with a given string. 1703 // Only types we might want to look up are included: 1704 // pointers, channels, maps, slices, and arrays. 1705 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1706 1707 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1708 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1709 } 1710 1711 // typesByString returns the subslice of typelinks() whose elements have 1712 // the given string representation. 1713 // It may be empty (no known types with that string) or may have 1714 // multiple elements (multiple types with that string). 1715 func typesByString(s string) []*rtype { 1716 sections, offset := typelinks() 1717 var ret []*rtype 1718 1719 for offsI, offs := range offset { 1720 section := sections[offsI] 1721 1722 // We are looking for the first index i where the string becomes >= s. 1723 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1724 i, j := 0, len(offs) 1725 for i < j { 1726 h := i + (j-i)/2 // avoid overflow when computing h 1727 // i ≤ h < j 1728 if !(rtypeOff(section, offs[h]).String() >= s) { 1729 i = h + 1 // preserves f(i-1) == false 1730 } else { 1731 j = h // preserves f(j) == true 1732 } 1733 } 1734 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1735 1736 // Having found the first, linear scan forward to find the last. 1737 // We could do a second binary search, but the caller is going 1738 // to do a linear scan anyway. 1739 for j := i; j < len(offs); j++ { 1740 typ := rtypeOff(section, offs[j]) 1741 if typ.String() != s { 1742 break 1743 } 1744 ret = append(ret, typ) 1745 } 1746 } 1747 return ret 1748 } 1749 1750 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1751 var lookupCache sync.Map // map[cacheKey]*rtype 1752 1753 // A cacheKey is the key for use in the lookupCache. 1754 // Four values describe any of the types we are looking for: 1755 // type kind, one or two subtypes, and an extra integer. 1756 type cacheKey struct { 1757 kind Kind 1758 t1 *rtype 1759 t2 *rtype 1760 extra uintptr 1761 } 1762 1763 // The funcLookupCache caches FuncOf lookups. 1764 // FuncOf does not share the common lookupCache since cacheKey is not 1765 // sufficient to represent functions unambiguously. 1766 var funcLookupCache struct { 1767 sync.Mutex // Guards stores (but not loads) on m. 1768 1769 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1770 // Elements of m are append-only and thus safe for concurrent reading. 1771 m sync.Map 1772 } 1773 1774 // ChanOf returns the channel type with the given direction and element type. 1775 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1776 // 1777 // The gc runtime imposes a limit of 64 kB on channel element types. 1778 // If t's size is equal to or exceeds this limit, ChanOf panics. 1779 func ChanOf(dir ChanDir, t Type) Type { 1780 typ := t.(*rtype) 1781 1782 // Look in cache. 1783 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1784 if ch, ok := lookupCache.Load(ckey); ok { 1785 return ch.(*rtype) 1786 } 1787 1788 // This restriction is imposed by the gc compiler and the runtime. 1789 if typ.size >= 1<<16 { 1790 panic("reflect.ChanOf: element size too large") 1791 } 1792 1793 // Look in known types. 1794 var s string 1795 switch dir { 1796 default: 1797 panic("reflect.ChanOf: invalid dir") 1798 case SendDir: 1799 s = "chan<- " + typ.String() 1800 case RecvDir: 1801 s = "<-chan " + typ.String() 1802 case BothDir: 1803 typeStr := typ.String() 1804 if typeStr[0] == '<' { 1805 // typ is recv chan, need parentheses as "<-" associates with leftmost 1806 // chan possible, see: 1807 // * https://golang.org/ref/spec#Channel_types 1808 // * https://github.com/golang/go/issues/39897 1809 s = "chan (" + typeStr + ")" 1810 } else { 1811 s = "chan " + typeStr 1812 } 1813 } 1814 for _, tt := range typesByString(s) { 1815 ch := (*chanType)(unsafe.Pointer(tt)) 1816 if ch.elem == typ && ch.dir == uintptr(dir) { 1817 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1818 return ti.(Type) 1819 } 1820 } 1821 1822 // Make a channel type. 1823 var ichan interface{} = (chan unsafe.Pointer)(nil) 1824 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1825 ch := *prototype 1826 ch.tflag = tflagRegularMemory 1827 ch.dir = uintptr(dir) 1828 ch.str = resolveReflectName(newName(s, "", false)) 1829 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1830 ch.elem = typ 1831 1832 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) 1833 return ti.(Type) 1834 } 1835 1836 // MapOf returns the map type with the given key and element types. 1837 // For example, if k represents int and e represents string, 1838 // MapOf(k, e) represents map[int]string. 1839 // 1840 // If the key type is not a valid map key type (that is, if it does 1841 // not implement Go's == operator), MapOf panics. 1842 func MapOf(key, elem Type) Type { 1843 ktyp := key.(*rtype) 1844 etyp := elem.(*rtype) 1845 1846 if ktyp.equal == nil { 1847 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1848 } 1849 1850 // Look in cache. 1851 ckey := cacheKey{Map, ktyp, etyp, 0} 1852 if mt, ok := lookupCache.Load(ckey); ok { 1853 return mt.(Type) 1854 } 1855 1856 // Look in known types. 1857 s := "map[" + ktyp.String() + "]" + etyp.String() 1858 for _, tt := range typesByString(s) { 1859 mt := (*mapType)(unsafe.Pointer(tt)) 1860 if mt.key == ktyp && mt.elem == etyp { 1861 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1862 return ti.(Type) 1863 } 1864 } 1865 1866 // Make a map type. 1867 // Note: flag values must match those used in the TMAP case 1868 // in ../cmd/compile/internal/gc/reflect.go:dtypesym. 1869 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1870 mt := **(**mapType)(unsafe.Pointer(&imap)) 1871 mt.str = resolveReflectName(newName(s, "", false)) 1872 mt.tflag = 0 1873 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1874 mt.key = ktyp 1875 mt.elem = etyp 1876 mt.bucket = bucketOf(ktyp, etyp) 1877 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1878 return typehash(ktyp, p, seed) 1879 } 1880 mt.flags = 0 1881 if ktyp.size > maxKeySize { 1882 mt.keysize = uint8(ptrSize) 1883 mt.flags |= 1 // indirect key 1884 } else { 1885 mt.keysize = uint8(ktyp.size) 1886 } 1887 if etyp.size > maxValSize { 1888 mt.valuesize = uint8(ptrSize) 1889 mt.flags |= 2 // indirect value 1890 } else { 1891 mt.valuesize = uint8(etyp.size) 1892 } 1893 mt.bucketsize = uint16(mt.bucket.size) 1894 if isReflexive(ktyp) { 1895 mt.flags |= 4 1896 } 1897 if needKeyUpdate(ktyp) { 1898 mt.flags |= 8 1899 } 1900 if hashMightPanic(ktyp) { 1901 mt.flags |= 16 1902 } 1903 mt.ptrToThis = 0 1904 1905 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) 1906 return ti.(Type) 1907 } 1908 1909 // TODO(crawshaw): as these funcTypeFixedN structs have no methods, 1910 // they could be defined at runtime using the StructOf function. 1911 type funcTypeFixed4 struct { 1912 funcType 1913 args [4]*rtype 1914 } 1915 type funcTypeFixed8 struct { 1916 funcType 1917 args [8]*rtype 1918 } 1919 type funcTypeFixed16 struct { 1920 funcType 1921 args [16]*rtype 1922 } 1923 type funcTypeFixed32 struct { 1924 funcType 1925 args [32]*rtype 1926 } 1927 type funcTypeFixed64 struct { 1928 funcType 1929 args [64]*rtype 1930 } 1931 type funcTypeFixed128 struct { 1932 funcType 1933 args [128]*rtype 1934 } 1935 1936 // FuncOf returns the function type with the given argument and result types. 1937 // For example if k represents int and e represents string, 1938 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1939 // 1940 // The variadic argument controls whether the function is variadic. FuncOf 1941 // panics if the in[len(in)-1] does not represent a slice and variadic is 1942 // true. 1943 func FuncOf(in, out []Type, variadic bool) Type { 1944 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1945 panic("reflect.FuncOf: last arg of variadic func must be slice") 1946 } 1947 1948 // Make a func type. 1949 var ifunc interface{} = (func())(nil) 1950 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1951 n := len(in) + len(out) 1952 1953 var ft *funcType 1954 var args []*rtype 1955 switch { 1956 case n <= 4: 1957 fixed := new(funcTypeFixed4) 1958 args = fixed.args[:0:len(fixed.args)] 1959 ft = &fixed.funcType 1960 case n <= 8: 1961 fixed := new(funcTypeFixed8) 1962 args = fixed.args[:0:len(fixed.args)] 1963 ft = &fixed.funcType 1964 case n <= 16: 1965 fixed := new(funcTypeFixed16) 1966 args = fixed.args[:0:len(fixed.args)] 1967 ft = &fixed.funcType 1968 case n <= 32: 1969 fixed := new(funcTypeFixed32) 1970 args = fixed.args[:0:len(fixed.args)] 1971 ft = &fixed.funcType 1972 case n <= 64: 1973 fixed := new(funcTypeFixed64) 1974 args = fixed.args[:0:len(fixed.args)] 1975 ft = &fixed.funcType 1976 case n <= 128: 1977 fixed := new(funcTypeFixed128) 1978 args = fixed.args[:0:len(fixed.args)] 1979 ft = &fixed.funcType 1980 default: 1981 panic("reflect.FuncOf: too many arguments") 1982 } 1983 *ft = *prototype 1984 1985 // Build a hash and minimally populate ft. 1986 var hash uint32 1987 for _, in := range in { 1988 t := in.(*rtype) 1989 args = append(args, t) 1990 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 1991 } 1992 if variadic { 1993 hash = fnv1(hash, 'v') 1994 } 1995 hash = fnv1(hash, '.') 1996 for _, out := range out { 1997 t := out.(*rtype) 1998 args = append(args, t) 1999 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2000 } 2001 if len(args) > 50 { 2002 panic("reflect.FuncOf does not support more than 50 arguments") 2003 } 2004 ft.tflag = 0 2005 ft.hash = hash 2006 ft.inCount = uint16(len(in)) 2007 ft.outCount = uint16(len(out)) 2008 if variadic { 2009 ft.outCount |= 1 << 15 2010 } 2011 2012 // Look in cache. 2013 if ts, ok := funcLookupCache.m.Load(hash); ok { 2014 for _, t := range ts.([]*rtype) { 2015 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2016 return t 2017 } 2018 } 2019 } 2020 2021 // Not in cache, lock and retry. 2022 funcLookupCache.Lock() 2023 defer funcLookupCache.Unlock() 2024 if ts, ok := funcLookupCache.m.Load(hash); ok { 2025 for _, t := range ts.([]*rtype) { 2026 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2027 return t 2028 } 2029 } 2030 } 2031 2032 addToCache := func(tt *rtype) Type { 2033 var rts []*rtype 2034 if rti, ok := funcLookupCache.m.Load(hash); ok { 2035 rts = rti.([]*rtype) 2036 } 2037 funcLookupCache.m.Store(hash, append(rts, tt)) 2038 return tt 2039 } 2040 2041 // Look in known types for the same string representation. 2042 str := funcStr(ft) 2043 for _, tt := range typesByString(str) { 2044 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2045 return addToCache(tt) 2046 } 2047 } 2048 2049 // Populate the remaining fields of ft and store in cache. 2050 ft.str = resolveReflectName(newName(str, "", false)) 2051 ft.ptrToThis = 0 2052 return addToCache(&ft.rtype) 2053 } 2054 2055 // funcStr builds a string representation of a funcType. 2056 func funcStr(ft *funcType) string { 2057 repr := make([]byte, 0, 64) 2058 repr = append(repr, "func("...) 2059 for i, t := range ft.in() { 2060 if i > 0 { 2061 repr = append(repr, ", "...) 2062 } 2063 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2064 repr = append(repr, "..."...) 2065 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2066 } else { 2067 repr = append(repr, t.String()...) 2068 } 2069 } 2070 repr = append(repr, ')') 2071 out := ft.out() 2072 if len(out) == 1 { 2073 repr = append(repr, ' ') 2074 } else if len(out) > 1 { 2075 repr = append(repr, " ("...) 2076 } 2077 for i, t := range out { 2078 if i > 0 { 2079 repr = append(repr, ", "...) 2080 } 2081 repr = append(repr, t.String()...) 2082 } 2083 if len(out) > 1 { 2084 repr = append(repr, ')') 2085 } 2086 return string(repr) 2087 } 2088 2089 // isReflexive reports whether the == operation on the type is reflexive. 2090 // That is, x == x for all values x of type t. 2091 func isReflexive(t *rtype) bool { 2092 switch t.Kind() { 2093 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2094 return true 2095 case Float32, Float64, Complex64, Complex128, Interface: 2096 return false 2097 case Array: 2098 tt := (*arrayType)(unsafe.Pointer(t)) 2099 return isReflexive(tt.elem) 2100 case Struct: 2101 tt := (*structType)(unsafe.Pointer(t)) 2102 for _, f := range tt.fields { 2103 if !isReflexive(f.typ) { 2104 return false 2105 } 2106 } 2107 return true 2108 default: 2109 // Func, Map, Slice, Invalid 2110 panic("isReflexive called on non-key type " + t.String()) 2111 } 2112 } 2113 2114 // needKeyUpdate reports whether map overwrites require the key to be copied. 2115 func needKeyUpdate(t *rtype) bool { 2116 switch t.Kind() { 2117 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2118 return false 2119 case Float32, Float64, Complex64, Complex128, Interface, String: 2120 // Float keys can be updated from +0 to -0. 2121 // String keys can be updated to use a smaller backing store. 2122 // Interfaces might have floats of strings in them. 2123 return true 2124 case Array: 2125 tt := (*arrayType)(unsafe.Pointer(t)) 2126 return needKeyUpdate(tt.elem) 2127 case Struct: 2128 tt := (*structType)(unsafe.Pointer(t)) 2129 for _, f := range tt.fields { 2130 if needKeyUpdate(f.typ) { 2131 return true 2132 } 2133 } 2134 return false 2135 default: 2136 // Func, Map, Slice, Invalid 2137 panic("needKeyUpdate called on non-key type " + t.String()) 2138 } 2139 } 2140 2141 // hashMightPanic reports whether the hash of a map key of type t might panic. 2142 func hashMightPanic(t *rtype) bool { 2143 switch t.Kind() { 2144 case Interface: 2145 return true 2146 case Array: 2147 tt := (*arrayType)(unsafe.Pointer(t)) 2148 return hashMightPanic(tt.elem) 2149 case Struct: 2150 tt := (*structType)(unsafe.Pointer(t)) 2151 for _, f := range tt.fields { 2152 if hashMightPanic(f.typ) { 2153 return true 2154 } 2155 } 2156 return false 2157 default: 2158 return false 2159 } 2160 } 2161 2162 // Make sure these routines stay in sync with ../../runtime/map.go! 2163 // These types exist only for GC, so we only fill out GC relevant info. 2164 // Currently, that's just size and the GC program. We also fill in string 2165 // for possible debugging use. 2166 const ( 2167 bucketSize uintptr = 8 2168 maxKeySize uintptr = 128 2169 maxValSize uintptr = 128 2170 ) 2171 2172 func bucketOf(ktyp, etyp *rtype) *rtype { 2173 if ktyp.size > maxKeySize { 2174 ktyp = PtrTo(ktyp).(*rtype) 2175 } 2176 if etyp.size > maxValSize { 2177 etyp = PtrTo(etyp).(*rtype) 2178 } 2179 2180 // Prepare GC data if any. 2181 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2182 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2183 // Note that since the key and value are known to be <= 128 bytes, 2184 // they're guaranteed to have bitmaps instead of GC programs. 2185 var gcdata *byte 2186 var ptrdata uintptr 2187 var overflowPad uintptr 2188 2189 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2190 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2191 panic("reflect: bad size computation in MapOf") 2192 } 2193 2194 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 { 2195 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2196 mask := make([]byte, (nptr+7)/8) 2197 base := bucketSize / ptrSize 2198 2199 if ktyp.ptrdata != 0 { 2200 emitGCMask(mask, base, ktyp, bucketSize) 2201 } 2202 base += bucketSize * ktyp.size / ptrSize 2203 2204 if etyp.ptrdata != 0 { 2205 emitGCMask(mask, base, etyp, bucketSize) 2206 } 2207 base += bucketSize * etyp.size / ptrSize 2208 base += overflowPad / ptrSize 2209 2210 word := base 2211 mask[word/8] |= 1 << (word % 8) 2212 gcdata = &mask[0] 2213 ptrdata = (word + 1) * ptrSize 2214 2215 // overflow word must be last 2216 if ptrdata != size { 2217 panic("reflect: bad layout computation in MapOf") 2218 } 2219 } 2220 2221 b := &rtype{ 2222 align: ptrSize, 2223 size: size, 2224 kind: uint8(Struct), 2225 ptrdata: ptrdata, 2226 gcdata: gcdata, 2227 } 2228 if overflowPad > 0 { 2229 b.align = 8 2230 } 2231 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2232 b.str = resolveReflectName(newName(s, "", false)) 2233 return b 2234 } 2235 2236 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2237 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end] 2238 } 2239 2240 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2241 // offset base. 2242 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { 2243 if typ.kind&kindGCProg != 0 { 2244 panic("reflect: unexpected GC program") 2245 } 2246 ptrs := typ.ptrdata / ptrSize 2247 words := typ.size / ptrSize 2248 mask := typ.gcSlice(0, (ptrs+7)/8) 2249 for j := uintptr(0); j < ptrs; j++ { 2250 if (mask[j/8]>>(j%8))&1 != 0 { 2251 for i := uintptr(0); i < n; i++ { 2252 k := base + i*words + j 2253 out[k/8] |= 1 << (k % 8) 2254 } 2255 } 2256 } 2257 } 2258 2259 // appendGCProg appends the GC program for the first ptrdata bytes of 2260 // typ to dst and returns the extended slice. 2261 func appendGCProg(dst []byte, typ *rtype) []byte { 2262 if typ.kind&kindGCProg != 0 { 2263 // Element has GC program; emit one element. 2264 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata))) 2265 prog := typ.gcSlice(4, 4+n-1) 2266 return append(dst, prog...) 2267 } 2268 2269 // Element is small with pointer mask; use as literal bits. 2270 ptrs := typ.ptrdata / ptrSize 2271 mask := typ.gcSlice(0, (ptrs+7)/8) 2272 2273 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2274 for ; ptrs > 120; ptrs -= 120 { 2275 dst = append(dst, 120) 2276 dst = append(dst, mask[:15]...) 2277 mask = mask[15:] 2278 } 2279 2280 dst = append(dst, byte(ptrs)) 2281 dst = append(dst, mask...) 2282 return dst 2283 } 2284 2285 // SliceOf returns the slice type with element type t. 2286 // For example, if t represents int, SliceOf(t) represents []int. 2287 func SliceOf(t Type) Type { 2288 typ := t.(*rtype) 2289 2290 // Look in cache. 2291 ckey := cacheKey{Slice, typ, nil, 0} 2292 if slice, ok := lookupCache.Load(ckey); ok { 2293 return slice.(Type) 2294 } 2295 2296 // Look in known types. 2297 s := "[]" + typ.String() 2298 for _, tt := range typesByString(s) { 2299 slice := (*sliceType)(unsafe.Pointer(tt)) 2300 if slice.elem == typ { 2301 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2302 return ti.(Type) 2303 } 2304 } 2305 2306 // Make a slice type. 2307 var islice interface{} = ([]unsafe.Pointer)(nil) 2308 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2309 slice := *prototype 2310 slice.tflag = 0 2311 slice.str = resolveReflectName(newName(s, "", false)) 2312 slice.hash = fnv1(typ.hash, '[') 2313 slice.elem = typ 2314 slice.ptrToThis = 0 2315 2316 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) 2317 return ti.(Type) 2318 } 2319 2320 // The structLookupCache caches StructOf lookups. 2321 // StructOf does not share the common lookupCache since we need to pin 2322 // the memory associated with *structTypeFixedN. 2323 var structLookupCache struct { 2324 sync.Mutex // Guards stores (but not loads) on m. 2325 2326 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2327 // Elements in m are append-only and thus safe for concurrent reading. 2328 m sync.Map 2329 } 2330 2331 type structTypeUncommon struct { 2332 structType 2333 u uncommonType 2334 } 2335 2336 // isLetter reports whether a given 'rune' is classified as a Letter. 2337 func isLetter(ch rune) bool { 2338 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2339 } 2340 2341 // isValidFieldName checks if a string is a valid (struct) field name or not. 2342 // 2343 // According to the language spec, a field name should be an identifier. 2344 // 2345 // identifier = letter { letter | unicode_digit } . 2346 // letter = unicode_letter | "_" . 2347 func isValidFieldName(fieldName string) bool { 2348 for i, c := range fieldName { 2349 if i == 0 && !isLetter(c) { 2350 return false 2351 } 2352 2353 if !(isLetter(c) || unicode.IsDigit(c)) { 2354 return false 2355 } 2356 } 2357 2358 return len(fieldName) > 0 2359 } 2360 2361 // StructOf returns the struct type containing fields. 2362 // The Offset and Index fields are ignored and computed as they would be 2363 // by the compiler. 2364 // 2365 // StructOf currently does not generate wrapper methods for embedded 2366 // fields and panics if passed unexported StructFields. 2367 // These limitations may be lifted in a future version. 2368 func StructOf(fields []StructField) Type { 2369 var ( 2370 hash = fnv1(0, []byte("struct {")...) 2371 size uintptr 2372 typalign uint8 2373 comparable = true 2374 methods []method 2375 2376 fs = make([]structField, len(fields)) 2377 repr = make([]byte, 0, 64) 2378 fset = map[string]struct{}{} // fields' names 2379 2380 hasGCProg = false // records whether a struct-field type has a GCProg 2381 ) 2382 2383 lastzero := uintptr(0) 2384 repr = append(repr, "struct {"...) 2385 pkgpath := "" 2386 for i, field := range fields { 2387 if field.Name == "" { 2388 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2389 } 2390 if !isValidFieldName(field.Name) { 2391 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2392 } 2393 if field.Type == nil { 2394 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2395 } 2396 f, fpkgpath := runtimeStructField(field) 2397 ft := f.typ 2398 if ft.kind&kindGCProg != 0 { 2399 hasGCProg = true 2400 } 2401 if fpkgpath != "" { 2402 if pkgpath == "" { 2403 pkgpath = fpkgpath 2404 } else if pkgpath != fpkgpath { 2405 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2406 } 2407 } 2408 2409 // Update string and hash 2410 name := f.name.name() 2411 hash = fnv1(hash, []byte(name)...) 2412 repr = append(repr, (" " + name)...) 2413 if f.embedded() { 2414 // Embedded field 2415 if f.typ.Kind() == Ptr { 2416 // Embedded ** and *interface{} are illegal 2417 elem := ft.Elem() 2418 if k := elem.Kind(); k == Ptr || k == Interface { 2419 panic("reflect.StructOf: illegal embedded field type " + ft.String()) 2420 } 2421 } 2422 2423 switch f.typ.Kind() { 2424 case Interface: 2425 ift := (*interfaceType)(unsafe.Pointer(ft)) 2426 for im, m := range ift.methods { 2427 if ift.nameOff(m.name).pkgPath() != "" { 2428 // TODO(sbinet). Issue 15924. 2429 panic("reflect: embedded interface with unexported method(s) not implemented") 2430 } 2431 2432 var ( 2433 mtyp = ift.typeOff(m.typ) 2434 ifield = i 2435 imethod = im 2436 ifn Value 2437 tfn Value 2438 ) 2439 2440 if ft.kind&kindDirectIface != 0 { 2441 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2442 var args []Value 2443 var recv = in[0] 2444 if len(in) > 1 { 2445 args = in[1:] 2446 } 2447 return recv.Field(ifield).Method(imethod).Call(args) 2448 }) 2449 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2450 var args []Value 2451 var recv = in[0] 2452 if len(in) > 1 { 2453 args = in[1:] 2454 } 2455 return recv.Field(ifield).Method(imethod).Call(args) 2456 }) 2457 } else { 2458 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2459 var args []Value 2460 var recv = in[0] 2461 if len(in) > 1 { 2462 args = in[1:] 2463 } 2464 return recv.Field(ifield).Method(imethod).Call(args) 2465 }) 2466 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2467 var args []Value 2468 var recv = Indirect(in[0]) 2469 if len(in) > 1 { 2470 args = in[1:] 2471 } 2472 return recv.Field(ifield).Method(imethod).Call(args) 2473 }) 2474 } 2475 2476 methods = append(methods, method{ 2477 name: resolveReflectName(ift.nameOff(m.name)), 2478 mtyp: resolveReflectType(mtyp), 2479 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2480 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2481 }) 2482 } 2483 case Ptr: 2484 ptr := (*ptrType)(unsafe.Pointer(ft)) 2485 if unt := ptr.uncommon(); unt != nil { 2486 if i > 0 && unt.mcount > 0 { 2487 // Issue 15924. 2488 panic("reflect: embedded type with methods not implemented if type is not first field") 2489 } 2490 if len(fields) > 1 { 2491 panic("reflect: embedded type with methods not implemented if there is more than one field") 2492 } 2493 for _, m := range unt.methods() { 2494 mname := ptr.nameOff(m.name) 2495 if mname.pkgPath() != "" { 2496 // TODO(sbinet). 2497 // Issue 15924. 2498 panic("reflect: embedded interface with unexported method(s) not implemented") 2499 } 2500 methods = append(methods, method{ 2501 name: resolveReflectName(mname), 2502 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2503 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2504 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2505 }) 2506 } 2507 } 2508 if unt := ptr.elem.uncommon(); unt != nil { 2509 for _, m := range unt.methods() { 2510 mname := ptr.nameOff(m.name) 2511 if mname.pkgPath() != "" { 2512 // TODO(sbinet) 2513 // Issue 15924. 2514 panic("reflect: embedded interface with unexported method(s) not implemented") 2515 } 2516 methods = append(methods, method{ 2517 name: resolveReflectName(mname), 2518 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2519 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2520 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2521 }) 2522 } 2523 } 2524 default: 2525 if unt := ft.uncommon(); unt != nil { 2526 if i > 0 && unt.mcount > 0 { 2527 // Issue 15924. 2528 panic("reflect: embedded type with methods not implemented if type is not first field") 2529 } 2530 if len(fields) > 1 && ft.kind&kindDirectIface != 0 { 2531 panic("reflect: embedded type with methods not implemented for non-pointer type") 2532 } 2533 for _, m := range unt.methods() { 2534 mname := ft.nameOff(m.name) 2535 if mname.pkgPath() != "" { 2536 // TODO(sbinet) 2537 // Issue 15924. 2538 panic("reflect: embedded interface with unexported method(s) not implemented") 2539 } 2540 methods = append(methods, method{ 2541 name: resolveReflectName(mname), 2542 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2543 ifn: resolveReflectText(ft.textOff(m.ifn)), 2544 tfn: resolveReflectText(ft.textOff(m.tfn)), 2545 }) 2546 2547 } 2548 } 2549 } 2550 } 2551 if _, dup := fset[name]; dup { 2552 panic("reflect.StructOf: duplicate field " + name) 2553 } 2554 fset[name] = struct{}{} 2555 2556 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2557 2558 repr = append(repr, (" " + ft.String())...) 2559 if f.name.tagLen() > 0 { 2560 hash = fnv1(hash, []byte(f.name.tag())...) 2561 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2562 } 2563 if i < len(fields)-1 { 2564 repr = append(repr, ';') 2565 } 2566 2567 comparable = comparable && (ft.equal != nil) 2568 2569 offset := align(size, uintptr(ft.align)) 2570 if ft.align > typalign { 2571 typalign = ft.align 2572 } 2573 size = offset + ft.size 2574 f.offsetEmbed |= offset << 1 2575 2576 if ft.size == 0 { 2577 lastzero = size 2578 } 2579 2580 fs[i] = f 2581 } 2582 2583 if size > 0 && lastzero == size { 2584 // This is a non-zero sized struct that ends in a 2585 // zero-sized field. We add an extra byte of padding, 2586 // to ensure that taking the address of the final 2587 // zero-sized field can't manufacture a pointer to the 2588 // next object in the heap. See issue 9401. 2589 size++ 2590 } 2591 2592 var typ *structType 2593 var ut *uncommonType 2594 2595 if len(methods) == 0 { 2596 t := new(structTypeUncommon) 2597 typ = &t.structType 2598 ut = &t.u 2599 } else { 2600 // A *rtype representing a struct is followed directly in memory by an 2601 // array of method objects representing the methods attached to the 2602 // struct. To get the same layout for a run time generated type, we 2603 // need an array directly following the uncommonType memory. 2604 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2605 tt := New(StructOf([]StructField{ 2606 {Name: "S", Type: TypeOf(structType{})}, 2607 {Name: "U", Type: TypeOf(uncommonType{})}, 2608 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2609 })) 2610 2611 typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr())) 2612 ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr())) 2613 2614 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods) 2615 } 2616 // TODO(sbinet): Once we allow embedding multiple types, 2617 // methods will need to be sorted like the compiler does. 2618 // TODO(sbinet): Once we allow non-exported methods, we will 2619 // need to compute xcount as the number of exported methods. 2620 ut.mcount = uint16(len(methods)) 2621 ut.xcount = ut.mcount 2622 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2623 2624 if len(fs) > 0 { 2625 repr = append(repr, ' ') 2626 } 2627 repr = append(repr, '}') 2628 hash = fnv1(hash, '}') 2629 str := string(repr) 2630 2631 // Round the size up to be a multiple of the alignment. 2632 size = align(size, uintptr(typalign)) 2633 2634 // Make the struct type. 2635 var istruct interface{} = struct{}{} 2636 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2637 *typ = *prototype 2638 typ.fields = fs 2639 if pkgpath != "" { 2640 typ.pkgPath = newName(pkgpath, "", false) 2641 } 2642 2643 // Look in cache. 2644 if ts, ok := structLookupCache.m.Load(hash); ok { 2645 for _, st := range ts.([]Type) { 2646 t := st.common() 2647 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2648 return t 2649 } 2650 } 2651 } 2652 2653 // Not in cache, lock and retry. 2654 structLookupCache.Lock() 2655 defer structLookupCache.Unlock() 2656 if ts, ok := structLookupCache.m.Load(hash); ok { 2657 for _, st := range ts.([]Type) { 2658 t := st.common() 2659 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2660 return t 2661 } 2662 } 2663 } 2664 2665 addToCache := func(t Type) Type { 2666 var ts []Type 2667 if ti, ok := structLookupCache.m.Load(hash); ok { 2668 ts = ti.([]Type) 2669 } 2670 structLookupCache.m.Store(hash, append(ts, t)) 2671 return t 2672 } 2673 2674 // Look in known types. 2675 for _, t := range typesByString(str) { 2676 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2677 // even if 't' wasn't a structType with methods, we should be ok 2678 // as the 'u uncommonType' field won't be accessed except when 2679 // tflag&tflagUncommon is set. 2680 return addToCache(t) 2681 } 2682 } 2683 2684 typ.str = resolveReflectName(newName(str, "", false)) 2685 typ.tflag = 0 // TODO: set tflagRegularMemory 2686 typ.hash = hash 2687 typ.size = size 2688 typ.ptrdata = typeptrdata(typ.common()) 2689 typ.align = typalign 2690 typ.fieldAlign = typalign 2691 typ.ptrToThis = 0 2692 if len(methods) > 0 { 2693 typ.tflag |= tflagUncommon 2694 } 2695 2696 if hasGCProg { 2697 lastPtrField := 0 2698 for i, ft := range fs { 2699 if ft.typ.pointers() { 2700 lastPtrField = i 2701 } 2702 } 2703 prog := []byte{0, 0, 0, 0} // will be length of prog 2704 var off uintptr 2705 for i, ft := range fs { 2706 if i > lastPtrField { 2707 // gcprog should not include anything for any field after 2708 // the last field that contains pointer data 2709 break 2710 } 2711 if !ft.typ.pointers() { 2712 // Ignore pointerless fields. 2713 continue 2714 } 2715 // Pad to start of this field with zeros. 2716 if ft.offset() > off { 2717 n := (ft.offset() - off) / ptrSize 2718 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2719 if n > 1 { 2720 prog = append(prog, 0x81) // repeat previous bit 2721 prog = appendVarint(prog, n-1) // n-1 times 2722 } 2723 off = ft.offset() 2724 } 2725 2726 prog = appendGCProg(prog, ft.typ) 2727 off += ft.typ.ptrdata 2728 } 2729 prog = append(prog, 0) 2730 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2731 typ.kind |= kindGCProg 2732 typ.gcdata = &prog[0] 2733 } else { 2734 typ.kind &^= kindGCProg 2735 bv := new(bitVector) 2736 addTypeBits(bv, 0, typ.common()) 2737 if len(bv.data) > 0 { 2738 typ.gcdata = &bv.data[0] 2739 } 2740 } 2741 typ.equal = nil 2742 if comparable { 2743 typ.equal = func(p, q unsafe.Pointer) bool { 2744 for _, ft := range typ.fields { 2745 pi := add(p, ft.offset(), "&x.field safe") 2746 qi := add(q, ft.offset(), "&x.field safe") 2747 if !ft.typ.equal(pi, qi) { 2748 return false 2749 } 2750 } 2751 return true 2752 } 2753 } 2754 2755 switch { 2756 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2757 // structs of 1 direct iface type can be direct 2758 typ.kind |= kindDirectIface 2759 default: 2760 typ.kind &^= kindDirectIface 2761 } 2762 2763 return addToCache(&typ.rtype) 2764 } 2765 2766 // runtimeStructField takes a StructField value passed to StructOf and 2767 // returns both the corresponding internal representation, of type 2768 // structField, and the pkgpath value to use for this field. 2769 func runtimeStructField(field StructField) (structField, string) { 2770 if field.Anonymous && field.PkgPath != "" { 2771 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2772 } 2773 2774 exported := field.PkgPath == "" 2775 if exported { 2776 // Best-effort check for misuse. 2777 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2778 c := field.Name[0] 2779 if 'a' <= c && c <= 'z' || c == '_' { 2780 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2781 } 2782 } 2783 2784 offsetEmbed := uintptr(0) 2785 if field.Anonymous { 2786 offsetEmbed |= 1 2787 } 2788 2789 resolveReflectType(field.Type.common()) // install in runtime 2790 f := structField{ 2791 name: newName(field.Name, string(field.Tag), exported), 2792 typ: field.Type.common(), 2793 offsetEmbed: offsetEmbed, 2794 } 2795 return f, field.PkgPath 2796 } 2797 2798 // typeptrdata returns the length in bytes of the prefix of t 2799 // containing pointer data. Anything after this offset is scalar data. 2800 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2801 func typeptrdata(t *rtype) uintptr { 2802 switch t.Kind() { 2803 case Struct: 2804 st := (*structType)(unsafe.Pointer(t)) 2805 // find the last field that has pointers. 2806 field := -1 2807 for i := range st.fields { 2808 ft := st.fields[i].typ 2809 if ft.pointers() { 2810 field = i 2811 } 2812 } 2813 if field == -1 { 2814 return 0 2815 } 2816 f := st.fields[field] 2817 return f.offset() + f.typ.ptrdata 2818 2819 default: 2820 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2821 } 2822 } 2823 2824 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2825 const maxPtrmaskBytes = 2048 2826 2827 // ArrayOf returns the array type with the given count and element type. 2828 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2829 // 2830 // If the resulting type would be larger than the available address space, 2831 // ArrayOf panics. 2832 func ArrayOf(count int, elem Type) Type { 2833 typ := elem.(*rtype) 2834 2835 // Look in cache. 2836 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2837 if array, ok := lookupCache.Load(ckey); ok { 2838 return array.(Type) 2839 } 2840 2841 // Look in known types. 2842 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2843 for _, tt := range typesByString(s) { 2844 array := (*arrayType)(unsafe.Pointer(tt)) 2845 if array.elem == typ { 2846 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2847 return ti.(Type) 2848 } 2849 } 2850 2851 // Make an array type. 2852 var iarray interface{} = [1]unsafe.Pointer{} 2853 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2854 array := *prototype 2855 array.tflag = typ.tflag & tflagRegularMemory 2856 array.str = resolveReflectName(newName(s, "", false)) 2857 array.hash = fnv1(typ.hash, '[') 2858 for n := uint32(count); n > 0; n >>= 8 { 2859 array.hash = fnv1(array.hash, byte(n)) 2860 } 2861 array.hash = fnv1(array.hash, ']') 2862 array.elem = typ 2863 array.ptrToThis = 0 2864 if typ.size > 0 { 2865 max := ^uintptr(0) / typ.size 2866 if uintptr(count) > max { 2867 panic("reflect.ArrayOf: array size would exceed virtual address space") 2868 } 2869 } 2870 array.size = typ.size * uintptr(count) 2871 if count > 0 && typ.ptrdata != 0 { 2872 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2873 } 2874 array.align = typ.align 2875 array.fieldAlign = typ.fieldAlign 2876 array.len = uintptr(count) 2877 array.slice = SliceOf(elem).(*rtype) 2878 2879 switch { 2880 case typ.ptrdata == 0 || array.size == 0: 2881 // No pointers. 2882 array.gcdata = nil 2883 array.ptrdata = 0 2884 2885 case count == 1: 2886 // In memory, 1-element array looks just like the element. 2887 array.kind |= typ.kind & kindGCProg 2888 array.gcdata = typ.gcdata 2889 array.ptrdata = typ.ptrdata 2890 2891 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2892 // Element is small with pointer mask; array is still small. 2893 // Create direct pointer mask by turning each 1 bit in elem 2894 // into count 1 bits in larger mask. 2895 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2896 emitGCMask(mask, 0, typ, array.len) 2897 array.gcdata = &mask[0] 2898 2899 default: 2900 // Create program that emits one element 2901 // and then repeats to make the array. 2902 prog := []byte{0, 0, 0, 0} // will be length of prog 2903 prog = appendGCProg(prog, typ) 2904 // Pad from ptrdata to size. 2905 elemPtrs := typ.ptrdata / ptrSize 2906 elemWords := typ.size / ptrSize 2907 if elemPtrs < elemWords { 2908 // Emit literal 0 bit, then repeat as needed. 2909 prog = append(prog, 0x01, 0x00) 2910 if elemPtrs+1 < elemWords { 2911 prog = append(prog, 0x81) 2912 prog = appendVarint(prog, elemWords-elemPtrs-1) 2913 } 2914 } 2915 // Repeat count-1 times. 2916 if elemWords < 0x80 { 2917 prog = append(prog, byte(elemWords|0x80)) 2918 } else { 2919 prog = append(prog, 0x80) 2920 prog = appendVarint(prog, elemWords) 2921 } 2922 prog = appendVarint(prog, uintptr(count)-1) 2923 prog = append(prog, 0) 2924 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2925 array.kind |= kindGCProg 2926 array.gcdata = &prog[0] 2927 array.ptrdata = array.size // overestimate but ok; must match program 2928 } 2929 2930 etyp := typ.common() 2931 esize := etyp.Size() 2932 2933 array.equal = nil 2934 if eequal := etyp.equal; eequal != nil { 2935 array.equal = func(p, q unsafe.Pointer) bool { 2936 for i := 0; i < count; i++ { 2937 pi := arrayAt(p, i, esize, "i < count") 2938 qi := arrayAt(q, i, esize, "i < count") 2939 if !eequal(pi, qi) { 2940 return false 2941 } 2942 2943 } 2944 return true 2945 } 2946 } 2947 2948 switch { 2949 case count == 1 && !ifaceIndir(typ): 2950 // array of 1 direct iface type can be direct 2951 array.kind |= kindDirectIface 2952 default: 2953 array.kind &^= kindDirectIface 2954 } 2955 2956 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) 2957 return ti.(Type) 2958 } 2959 2960 func appendVarint(x []byte, v uintptr) []byte { 2961 for ; v >= 0x80; v >>= 7 { 2962 x = append(x, byte(v|0x80)) 2963 } 2964 x = append(x, byte(v)) 2965 return x 2966 } 2967 2968 // toType converts from a *rtype to a Type that can be returned 2969 // to the client of package reflect. In gc, the only concern is that 2970 // a nil *rtype must be replaced by a nil Type, but in gccgo this 2971 // function takes care of ensuring that multiple *rtype for the same 2972 // type are coalesced into a single Type. 2973 func toType(t *rtype) Type { 2974 if t == nil { 2975 return nil 2976 } 2977 return t 2978 } 2979 2980 type layoutKey struct { 2981 ftyp *funcType // function signature 2982 rcvr *rtype // receiver type, or nil if none 2983 } 2984 2985 type layoutType struct { 2986 t *rtype 2987 argSize uintptr // size of arguments 2988 retOffset uintptr // offset of return values. 2989 stack *bitVector 2990 framePool *sync.Pool 2991 } 2992 2993 var layoutCache sync.Map // map[layoutKey]layoutType 2994 2995 // funcLayout computes a struct type representing the layout of the 2996 // function arguments and return values for the function type t. 2997 // If rcvr != nil, rcvr specifies the type of the receiver. 2998 // The returned type exists only for GC, so we only fill out GC relevant info. 2999 // Currently, that's just size and the GC program. We also fill in 3000 // the name for possible debugging use. 3001 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3002 if t.Kind() != Func { 3003 panic("reflect: funcLayout of non-func type " + t.String()) 3004 } 3005 if rcvr != nil && rcvr.Kind() == Interface { 3006 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3007 } 3008 k := layoutKey{t, rcvr} 3009 if lti, ok := layoutCache.Load(k); ok { 3010 lt := lti.(layoutType) 3011 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3012 } 3013 3014 // compute gc program & stack bitmap for arguments 3015 ptrmap := new(bitVector) 3016 var offset uintptr 3017 if rcvr != nil { 3018 // Reflect uses the "interface" calling convention for 3019 // methods, where receivers take one word of argument 3020 // space no matter how big they actually are. 3021 if ifaceIndir(rcvr) || rcvr.pointers() { 3022 ptrmap.append(1) 3023 } else { 3024 ptrmap.append(0) 3025 } 3026 offset += ptrSize 3027 } 3028 for _, arg := range t.in() { 3029 offset += -offset & uintptr(arg.align-1) 3030 addTypeBits(ptrmap, offset, arg) 3031 offset += arg.size 3032 } 3033 argSize = offset 3034 offset += -offset & (ptrSize - 1) 3035 retOffset = offset 3036 for _, res := range t.out() { 3037 offset += -offset & uintptr(res.align-1) 3038 addTypeBits(ptrmap, offset, res) 3039 offset += res.size 3040 } 3041 offset += -offset & (ptrSize - 1) 3042 3043 // build dummy rtype holding gc program 3044 x := &rtype{ 3045 align: ptrSize, 3046 size: offset, 3047 ptrdata: uintptr(ptrmap.n) * ptrSize, 3048 } 3049 if ptrmap.n > 0 { 3050 x.gcdata = &ptrmap.data[0] 3051 } 3052 3053 var s string 3054 if rcvr != nil { 3055 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3056 } else { 3057 s = "funcargs(" + t.String() + ")" 3058 } 3059 x.str = resolveReflectName(newName(s, "", false)) 3060 3061 // cache result for future callers 3062 framePool = &sync.Pool{New: func() interface{} { 3063 return unsafe_New(x) 3064 }} 3065 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 3066 t: x, 3067 argSize: argSize, 3068 retOffset: retOffset, 3069 stack: ptrmap, 3070 framePool: framePool, 3071 }) 3072 lt := lti.(layoutType) 3073 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3074 } 3075 3076 // ifaceIndir reports whether t is stored indirectly in an interface value. 3077 func ifaceIndir(t *rtype) bool { 3078 return t.kind&kindDirectIface == 0 3079 } 3080 3081 // Note: this type must agree with runtime.bitvector. 3082 type bitVector struct { 3083 n uint32 // number of bits 3084 data []byte 3085 } 3086 3087 // append a bit to the bitmap. 3088 func (bv *bitVector) append(bit uint8) { 3089 if bv.n%8 == 0 { 3090 bv.data = append(bv.data, 0) 3091 } 3092 bv.data[bv.n/8] |= bit << (bv.n % 8) 3093 bv.n++ 3094 } 3095 3096 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3097 if t.ptrdata == 0 { 3098 return 3099 } 3100 3101 switch Kind(t.kind & kindMask) { 3102 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3103 // 1 pointer at start of representation 3104 for bv.n < uint32(offset/uintptr(ptrSize)) { 3105 bv.append(0) 3106 } 3107 bv.append(1) 3108 3109 case Interface: 3110 // 2 pointers 3111 for bv.n < uint32(offset/uintptr(ptrSize)) { 3112 bv.append(0) 3113 } 3114 bv.append(1) 3115 bv.append(1) 3116 3117 case Array: 3118 // repeat inner type 3119 tt := (*arrayType)(unsafe.Pointer(t)) 3120 for i := 0; i < int(tt.len); i++ { 3121 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3122 } 3123 3124 case Struct: 3125 // apply fields 3126 tt := (*structType)(unsafe.Pointer(t)) 3127 for i := range tt.fields { 3128 f := &tt.fields[i] 3129 addTypeBits(bv, offset+f.offset(), f.typ) 3130 } 3131 } 3132 }