github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "runtime" 20 "strconv" 21 "sync" 22 "unicode" 23 "unicode/utf8" 24 "unsafe" 25 ) 26 27 // Type is the representation of a Go type. 28 // 29 // Not all methods apply to all kinds of types. Restrictions, 30 // if any, are noted in the documentation for each method. 31 // Use the Kind method to find out the kind of type before 32 // calling kind-specific methods. Calling a method 33 // inappropriate to the kind of type causes a run-time panic. 34 // 35 // Type values are comparable, such as with the == operator, 36 // so they can be used as map keys. 37 // Two Type values are equal if they represent identical types. 38 type Type interface { 39 // Methods applicable to all types. 40 41 // Align returns the alignment in bytes of a value of 42 // this type when allocated in memory. 43 Align() int 44 45 // FieldAlign returns the alignment in bytes of a value of 46 // this type when used as a field in a struct. 47 FieldAlign() int 48 49 // Method returns the i'th method in the type's method set. 50 // It panics if i is not in the range [0, NumMethod()). 51 // 52 // For a non-interface type T or *T, the returned Method's Type and Func 53 // fields describe a function whose first argument is the receiver. 54 // 55 // For an interface type, the returned Method's Type field gives the 56 // method signature, without a receiver, and the Func field is nil. 57 Method(int) Method 58 59 // MethodByName returns the method with that name in the type's 60 // method set and a boolean indicating if the method was found. 61 // 62 // For a non-interface type T or *T, the returned Method's Type and Func 63 // fields describe a function whose first argument is the receiver. 64 // 65 // For an interface type, the returned Method's Type field gives the 66 // method signature, without a receiver, and the Func field is nil. 67 MethodByName(string) (Method, bool) 68 69 // NumMethod returns the number of exported methods in the type's method set. 70 NumMethod() int 71 72 // Name returns the type's name within its package for a defined type. 73 // For other (non-defined) types it returns the empty string. 74 Name() string 75 76 // PkgPath returns a defined type's package path, that is, the import path 77 // that uniquely identifies the package, such as "encoding/base64". 78 // If the type was predeclared (string, error) or not defined (*T, struct{}, 79 // []int, or A where A is an alias for a non-defined type), the package path 80 // will be the empty string. 81 PkgPath() string 82 83 // Size returns the number of bytes needed to store 84 // a value of the given type; it is analogous to unsafe.Sizeof. 85 Size() uintptr 86 87 // String returns a string representation of the type. 88 // The string representation may use shortened package names 89 // (e.g., base64 instead of "encoding/base64") and is not 90 // guaranteed to be unique among types. To test for type identity, 91 // compare the Types directly. 92 String() string 93 94 // Kind returns the specific kind of this type. 95 Kind() Kind 96 97 // Implements reports whether the type implements the interface type u. 98 Implements(u Type) bool 99 100 // AssignableTo reports whether a value of the type is assignable to type u. 101 AssignableTo(u Type) bool 102 103 // ConvertibleTo reports whether a value of the type is convertible to type u. 104 ConvertibleTo(u Type) bool 105 106 // Comparable reports whether values of this type are comparable. 107 Comparable() bool 108 109 // Methods applicable only to some types, depending on Kind. 110 // The methods allowed for each kind are: 111 // 112 // Int*, Uint*, Float*, Complex*: Bits 113 // Array: Elem, Len 114 // Chan: ChanDir, Elem 115 // Func: In, NumIn, Out, NumOut, IsVariadic. 116 // Map: Key, Elem 117 // Ptr: Elem 118 // Slice: Elem 119 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 120 121 // Bits returns the size of the type in bits. 122 // It panics if the type's Kind is not one of the 123 // sized or unsized Int, Uint, Float, or Complex kinds. 124 Bits() int 125 126 // ChanDir returns a channel type's direction. 127 // It panics if the type's Kind is not Chan. 128 ChanDir() ChanDir 129 130 // IsVariadic reports whether a function type's final input parameter 131 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 132 // implicit actual type []T. 133 // 134 // For concreteness, if t represents func(x int, y ... float64), then 135 // 136 // t.NumIn() == 2 137 // t.In(0) is the reflect.Type for "int" 138 // t.In(1) is the reflect.Type for "[]float64" 139 // t.IsVariadic() == true 140 // 141 // IsVariadic panics if the type's Kind is not Func. 142 IsVariadic() bool 143 144 // Elem returns a type's element type. 145 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 146 Elem() Type 147 148 // Field returns a struct type's i'th field. 149 // It panics if the type's Kind is not Struct. 150 // It panics if i is not in the range [0, NumField()). 151 Field(i int) StructField 152 153 // FieldByIndex returns the nested field corresponding 154 // to the index sequence. It is equivalent to calling Field 155 // successively for each index i. 156 // It panics if the type's Kind is not Struct. 157 FieldByIndex(index []int) StructField 158 159 // FieldByName returns the struct field with the given name 160 // and a boolean indicating if the field was found. 161 FieldByName(name string) (StructField, bool) 162 163 // FieldByNameFunc returns the struct field with a name 164 // that satisfies the match function and a boolean indicating if 165 // the field was found. 166 // 167 // FieldByNameFunc considers the fields in the struct itself 168 // and then the fields in any embedded structs, in breadth first order, 169 // stopping at the shallowest nesting depth containing one or more 170 // fields satisfying the match function. If multiple fields at that depth 171 // satisfy the match function, they cancel each other 172 // and FieldByNameFunc returns no match. 173 // This behavior mirrors Go's handling of name lookup in 174 // structs containing embedded fields. 175 FieldByNameFunc(match func(string) bool) (StructField, bool) 176 177 // In returns the type of a function type's i'th input parameter. 178 // It panics if the type's Kind is not Func. 179 // It panics if i is not in the range [0, NumIn()). 180 In(i int) Type 181 182 // Key returns a map type's key type. 183 // It panics if the type's Kind is not Map. 184 Key() Type 185 186 // Len returns an array type's length. 187 // It panics if the type's Kind is not Array. 188 Len() int 189 190 // NumField returns a struct type's field count. 191 // It panics if the type's Kind is not Struct. 192 NumField() int 193 194 // NumIn returns a function type's input parameter count. 195 // It panics if the type's Kind is not Func. 196 NumIn() int 197 198 // NumOut returns a function type's output parameter count. 199 // It panics if the type's Kind is not Func. 200 NumOut() int 201 202 // Out returns the type of a function type's i'th output parameter. 203 // It panics if the type's Kind is not Func. 204 // It panics if i is not in the range [0, NumOut()). 205 Out(i int) Type 206 207 common() *rtype 208 uncommon() *uncommonType 209 } 210 211 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 212 // if the names are equal, even if they are unexported names originating 213 // in different packages. The practical effect of this is that the result of 214 // t.FieldByName("x") is not well defined if the struct type t contains 215 // multiple fields named x (embedded from different packages). 216 // FieldByName may return one of the fields named x or may report that there are none. 217 // See https://golang.org/issue/4876 for more details. 218 219 /* 220 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 221 * A few are known to ../runtime/type.go to convey to debuggers. 222 * They are also known to ../runtime/type.go. 223 */ 224 225 // A Kind represents the specific kind of type that a Type represents. 226 // The zero Kind is not a valid kind. 227 type Kind uint 228 229 const ( 230 Invalid Kind = iota 231 Bool 232 Int 233 Int8 234 Int16 235 Int32 236 Int64 237 Uint 238 Uint8 239 Uint16 240 Uint32 241 Uint64 242 Uintptr 243 Float32 244 Float64 245 Complex64 246 Complex128 247 Array 248 Chan 249 Func 250 Interface 251 Map 252 Ptr 253 Slice 254 String 255 Struct 256 UnsafePointer 257 ) 258 259 // tflag is used by an rtype to signal what extra type information is 260 // available in the memory directly following the rtype value. 261 // 262 // tflag values must be kept in sync with copies in: 263 // cmd/compile/internal/gc/reflect.go 264 // cmd/link/internal/ld/decodesym.go 265 // runtime/type.go 266 type tflag uint8 267 268 const ( 269 // tflagUncommon means that there is a pointer, *uncommonType, 270 // just beyond the outer type structure. 271 // 272 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 273 // then t has uncommonType data and it can be accessed as: 274 // 275 // type tUncommon struct { 276 // structType 277 // u uncommonType 278 // } 279 // u := &(*tUncommon)(unsafe.Pointer(t)).u 280 tflagUncommon tflag = 1 << 0 281 282 // tflagExtraStar means the name in the str field has an 283 // extraneous '*' prefix. This is because for most types T in 284 // a program, the type *T also exists and reusing the str data 285 // saves binary size. 286 tflagExtraStar tflag = 1 << 1 287 288 // tflagNamed means the type has a name. 289 tflagNamed tflag = 1 << 2 290 ) 291 292 // rtype is the common implementation of most values. 293 // It is embedded in other struct types. 294 // 295 // rtype must be kept in sync with ../runtime/type.go:/^type._type. 296 type rtype struct { 297 size uintptr 298 ptrdata uintptr // number of bytes in the type that can contain pointers 299 hash uint32 // hash of type; avoids computation in hash tables 300 tflag tflag // extra type information flags 301 align uint8 // alignment of variable with this type 302 fieldAlign uint8 // alignment of struct field with this type 303 kind uint8 // enumeration for C 304 alg *typeAlg // algorithm table 305 gcdata *byte // garbage collection data 306 str nameOff // string form 307 ptrToThis typeOff // type for pointer to this type, may be zero 308 } 309 310 // a copy of runtime.typeAlg 311 type typeAlg struct { 312 // function for hashing objects of this type 313 // (ptr to object, seed) -> hash 314 hash func(unsafe.Pointer, uintptr) uintptr 315 // function for comparing objects of this type 316 // (ptr to object A, ptr to object B) -> ==? 317 equal func(unsafe.Pointer, unsafe.Pointer) bool 318 } 319 320 // Method on non-interface type 321 type method struct { 322 name nameOff // name of method 323 mtyp typeOff // method type (without receiver) 324 ifn textOff // fn used in interface call (one-word receiver) 325 tfn textOff // fn used for normal method call 326 } 327 328 // uncommonType is present only for defined types or types with methods 329 // (if T is a defined type, the uncommonTypes for T and *T have methods). 330 // Using a pointer to this struct reduces the overall size required 331 // to describe a non-defined type with no methods. 332 type uncommonType struct { 333 pkgPath nameOff // import path; empty for built-in types like int, string 334 mcount uint16 // number of methods 335 xcount uint16 // number of exported methods 336 moff uint32 // offset from this uncommontype to [mcount]method 337 _ uint32 // unused 338 } 339 340 // ChanDir represents a channel type's direction. 341 type ChanDir int 342 343 const ( 344 RecvDir ChanDir = 1 << iota // <-chan 345 SendDir // chan<- 346 BothDir = RecvDir | SendDir // chan 347 ) 348 349 // arrayType represents a fixed array type. 350 type arrayType struct { 351 rtype 352 elem *rtype // array element type 353 slice *rtype // slice type 354 len uintptr 355 } 356 357 // chanType represents a channel type. 358 type chanType struct { 359 rtype 360 elem *rtype // channel element type 361 dir uintptr // channel direction (ChanDir) 362 } 363 364 // funcType represents a function type. 365 // 366 // A *rtype for each in and out parameter is stored in an array that 367 // directly follows the funcType (and possibly its uncommonType). So 368 // a function type with one method, one input, and one output is: 369 // 370 // struct { 371 // funcType 372 // uncommonType 373 // [2]*rtype // [0] is in, [1] is out 374 // } 375 type funcType struct { 376 rtype 377 inCount uint16 378 outCount uint16 // top bit is set if last input parameter is ... 379 } 380 381 // imethod represents a method on an interface type 382 type imethod struct { 383 name nameOff // name of method 384 typ typeOff // .(*FuncType) underneath 385 } 386 387 // interfaceType represents an interface type. 388 type interfaceType struct { 389 rtype 390 pkgPath name // import path 391 methods []imethod // sorted by hash 392 } 393 394 // mapType represents a map type. 395 type mapType struct { 396 rtype 397 key *rtype // map key type 398 elem *rtype // map element (value) type 399 bucket *rtype // internal bucket structure 400 keysize uint8 // size of key slot 401 indirectkey uint8 // store ptr to key instead of key itself 402 valuesize uint8 // size of value slot 403 indirectvalue uint8 // store ptr to value instead of value itself 404 bucketsize uint16 // size of bucket 405 reflexivekey bool // true if k==k for all keys 406 needkeyupdate bool // true if we need to update key on an overwrite 407 } 408 409 // ptrType represents a pointer type. 410 type ptrType struct { 411 rtype 412 elem *rtype // pointer element (pointed at) type 413 } 414 415 // sliceType represents a slice type. 416 type sliceType struct { 417 rtype 418 elem *rtype // slice element type 419 } 420 421 // Struct field 422 type structField struct { 423 name name // name is always non-empty 424 typ *rtype // type of field 425 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded 426 } 427 428 func (f *structField) offset() uintptr { 429 return f.offsetEmbed >> 1 430 } 431 432 func (f *structField) embedded() bool { 433 return f.offsetEmbed&1 != 0 434 } 435 436 // structType represents a struct type. 437 type structType struct { 438 rtype 439 pkgPath name 440 fields []structField // sorted by offset 441 } 442 443 // name is an encoded type name with optional extra data. 444 // 445 // The first byte is a bit field containing: 446 // 447 // 1<<0 the name is exported 448 // 1<<1 tag data follows the name 449 // 1<<2 pkgPath nameOff follows the name and tag 450 // 451 // The next two bytes are the data length: 452 // 453 // l := uint16(data[1])<<8 | uint16(data[2]) 454 // 455 // Bytes [3:3+l] are the string data. 456 // 457 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 458 // with the data following. 459 // 460 // If the import path follows, then 4 bytes at the end of 461 // the data form a nameOff. The import path is only set for concrete 462 // methods that are defined in a different package than their type. 463 // 464 // If a name starts with "*", then the exported bit represents 465 // whether the pointed to type is exported. 466 type name struct { 467 bytes *byte 468 } 469 470 func (n name) data(off int, whySafe string) *byte { 471 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe)) 472 } 473 474 func (n name) isExported() bool { 475 return (*n.bytes)&(1<<0) != 0 476 } 477 478 func (n name) nameLen() int { 479 return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field"))) 480 } 481 482 func (n name) tagLen() int { 483 if *n.data(0, "name flag field")&(1<<1) == 0 { 484 return 0 485 } 486 off := 3 + n.nameLen() 487 return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field"))) 488 } 489 490 func (n name) name() (s string) { 491 if n.bytes == nil { 492 return 493 } 494 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 495 496 hdr := (*stringHeader)(unsafe.Pointer(&s)) 497 hdr.Data = unsafe.Pointer(&b[3]) 498 hdr.Len = int(b[1])<<8 | int(b[2]) 499 return s 500 } 501 502 func (n name) tag() (s string) { 503 tl := n.tagLen() 504 if tl == 0 { 505 return "" 506 } 507 nl := n.nameLen() 508 hdr := (*stringHeader)(unsafe.Pointer(&s)) 509 hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string")) 510 hdr.Len = tl 511 return s 512 } 513 514 func (n name) pkgPath() string { 515 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 { 516 return "" 517 } 518 off := 3 + n.nameLen() 519 if tl := n.tagLen(); tl > 0 { 520 off += 2 + tl 521 } 522 var nameOff int32 523 // Note that this field may not be aligned in memory, 524 // so we cannot use a direct int32 assignment here. 525 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:]) 526 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 527 return pkgPathName.name() 528 } 529 530 // round n up to a multiple of a. a must be a power of 2. 531 func round(n, a uintptr) uintptr { 532 return (n + a - 1) &^ (a - 1) 533 } 534 535 func newName(n, tag string, exported bool) name { 536 if len(n) > 1<<16-1 { 537 panic("reflect.nameFrom: name too long: " + n) 538 } 539 if len(tag) > 1<<16-1 { 540 panic("reflect.nameFrom: tag too long: " + tag) 541 } 542 543 var bits byte 544 l := 1 + 2 + len(n) 545 if exported { 546 bits |= 1 << 0 547 } 548 if len(tag) > 0 { 549 l += 2 + len(tag) 550 bits |= 1 << 1 551 } 552 553 b := make([]byte, l) 554 b[0] = bits 555 b[1] = uint8(len(n) >> 8) 556 b[2] = uint8(len(n)) 557 copy(b[3:], n) 558 if len(tag) > 0 { 559 tb := b[3+len(n):] 560 tb[0] = uint8(len(tag) >> 8) 561 tb[1] = uint8(len(tag)) 562 copy(tb[2:], tag) 563 } 564 565 return name{bytes: &b[0]} 566 } 567 568 /* 569 * The compiler knows the exact layout of all the data structures above. 570 * The compiler does not know about the data structures and methods below. 571 */ 572 573 // Method represents a single method. 574 type Method struct { 575 // Name is the method name. 576 // PkgPath is the package path that qualifies a lower case (unexported) 577 // method name. It is empty for upper case (exported) method names. 578 // The combination of PkgPath and Name uniquely identifies a method 579 // in a method set. 580 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 581 Name string 582 PkgPath string 583 584 Type Type // method type 585 Func Value // func with receiver as first argument 586 Index int // index for Type.Method 587 } 588 589 const ( 590 kindDirectIface = 1 << 5 591 kindGCProg = 1 << 6 // Type.gc points to GC program 592 kindNoPointers = 1 << 7 593 kindMask = (1 << 5) - 1 594 ) 595 596 func (k Kind) String() string { 597 if int(k) < len(kindNames) { 598 return kindNames[k] 599 } 600 return "kind" + strconv.Itoa(int(k)) 601 } 602 603 var kindNames = []string{ 604 Invalid: "invalid", 605 Bool: "bool", 606 Int: "int", 607 Int8: "int8", 608 Int16: "int16", 609 Int32: "int32", 610 Int64: "int64", 611 Uint: "uint", 612 Uint8: "uint8", 613 Uint16: "uint16", 614 Uint32: "uint32", 615 Uint64: "uint64", 616 Uintptr: "uintptr", 617 Float32: "float32", 618 Float64: "float64", 619 Complex64: "complex64", 620 Complex128: "complex128", 621 Array: "array", 622 Chan: "chan", 623 Func: "func", 624 Interface: "interface", 625 Map: "map", 626 Ptr: "ptr", 627 Slice: "slice", 628 String: "string", 629 Struct: "struct", 630 UnsafePointer: "unsafe.Pointer", 631 } 632 633 func (t *uncommonType) methods() []method { 634 if t.mcount == 0 { 635 return nil 636 } 637 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] 638 } 639 640 func (t *uncommonType) exportedMethods() []method { 641 if t.xcount == 0 { 642 return nil 643 } 644 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount] 645 } 646 647 // resolveNameOff resolves a name offset from a base pointer. 648 // The (*rtype).nameOff method is a convenience wrapper for this function. 649 // Implemented in the runtime package. 650 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 651 652 // resolveTypeOff resolves an *rtype offset from a base type. 653 // The (*rtype).typeOff method is a convenience wrapper for this function. 654 // Implemented in the runtime package. 655 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 656 657 // resolveTextOff resolves an function pointer offset from a base type. 658 // The (*rtype).textOff method is a convenience wrapper for this function. 659 // Implemented in the runtime package. 660 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 661 662 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 663 // It returns a new ID that can be used as a typeOff or textOff, and will 664 // be resolved correctly. Implemented in the runtime package. 665 func addReflectOff(ptr unsafe.Pointer) int32 666 667 // resolveReflectType adds a name to the reflection lookup map in the runtime. 668 // It returns a new nameOff that can be used to refer to the pointer. 669 func resolveReflectName(n name) nameOff { 670 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 671 } 672 673 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 674 // It returns a new typeOff that can be used to refer to the pointer. 675 func resolveReflectType(t *rtype) typeOff { 676 return typeOff(addReflectOff(unsafe.Pointer(t))) 677 } 678 679 // resolveReflectText adds a function pointer to the reflection lookup map in 680 // the runtime. It returns a new textOff that can be used to refer to the 681 // pointer. 682 func resolveReflectText(ptr unsafe.Pointer) textOff { 683 return textOff(addReflectOff(ptr)) 684 } 685 686 type nameOff int32 // offset to a name 687 type typeOff int32 // offset to an *rtype 688 type textOff int32 // offset from top of text section 689 690 func (t *rtype) nameOff(off nameOff) name { 691 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 692 } 693 694 func (t *rtype) typeOff(off typeOff) *rtype { 695 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 696 } 697 698 func (t *rtype) textOff(off textOff) unsafe.Pointer { 699 return resolveTextOff(unsafe.Pointer(t), int32(off)) 700 } 701 702 func (t *rtype) uncommon() *uncommonType { 703 if t.tflag&tflagUncommon == 0 { 704 return nil 705 } 706 switch t.Kind() { 707 case Struct: 708 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 709 case Ptr: 710 type u struct { 711 ptrType 712 u uncommonType 713 } 714 return &(*u)(unsafe.Pointer(t)).u 715 case Func: 716 type u struct { 717 funcType 718 u uncommonType 719 } 720 return &(*u)(unsafe.Pointer(t)).u 721 case Slice: 722 type u struct { 723 sliceType 724 u uncommonType 725 } 726 return &(*u)(unsafe.Pointer(t)).u 727 case Array: 728 type u struct { 729 arrayType 730 u uncommonType 731 } 732 return &(*u)(unsafe.Pointer(t)).u 733 case Chan: 734 type u struct { 735 chanType 736 u uncommonType 737 } 738 return &(*u)(unsafe.Pointer(t)).u 739 case Map: 740 type u struct { 741 mapType 742 u uncommonType 743 } 744 return &(*u)(unsafe.Pointer(t)).u 745 case Interface: 746 type u struct { 747 interfaceType 748 u uncommonType 749 } 750 return &(*u)(unsafe.Pointer(t)).u 751 default: 752 type u struct { 753 rtype 754 u uncommonType 755 } 756 return &(*u)(unsafe.Pointer(t)).u 757 } 758 } 759 760 func (t *rtype) String() string { 761 s := t.nameOff(t.str).name() 762 if t.tflag&tflagExtraStar != 0 { 763 return s[1:] 764 } 765 return s 766 } 767 768 func (t *rtype) Size() uintptr { return t.size } 769 770 func (t *rtype) Bits() int { 771 if t == nil { 772 panic("reflect: Bits of nil Type") 773 } 774 k := t.Kind() 775 if k < Int || k > Complex128 { 776 panic("reflect: Bits of non-arithmetic Type " + t.String()) 777 } 778 return int(t.size) * 8 779 } 780 781 func (t *rtype) Align() int { return int(t.align) } 782 783 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 784 785 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 786 787 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 } 788 789 func (t *rtype) common() *rtype { return t } 790 791 func (t *rtype) exportedMethods() []method { 792 ut := t.uncommon() 793 if ut == nil { 794 return nil 795 } 796 return ut.exportedMethods() 797 } 798 799 func (t *rtype) NumMethod() int { 800 if t.Kind() == Interface { 801 tt := (*interfaceType)(unsafe.Pointer(t)) 802 return tt.NumMethod() 803 } 804 return len(t.exportedMethods()) 805 } 806 807 func (t *rtype) Method(i int) (m Method) { 808 if t.Kind() == Interface { 809 tt := (*interfaceType)(unsafe.Pointer(t)) 810 return tt.Method(i) 811 } 812 methods := t.exportedMethods() 813 if i < 0 || i >= len(methods) { 814 panic("reflect: Method index out of range") 815 } 816 p := methods[i] 817 pname := t.nameOff(p.name) 818 m.Name = pname.name() 819 fl := flag(Func) 820 mtyp := t.typeOff(p.mtyp) 821 ft := (*funcType)(unsafe.Pointer(mtyp)) 822 in := make([]Type, 0, 1+len(ft.in())) 823 in = append(in, t) 824 for _, arg := range ft.in() { 825 in = append(in, arg) 826 } 827 out := make([]Type, 0, len(ft.out())) 828 for _, ret := range ft.out() { 829 out = append(out, ret) 830 } 831 mt := FuncOf(in, out, ft.IsVariadic()) 832 m.Type = mt 833 tfn := t.textOff(p.tfn) 834 fn := unsafe.Pointer(&tfn) 835 m.Func = Value{mt.(*rtype), fn, fl} 836 837 m.Index = i 838 return m 839 } 840 841 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 842 if t.Kind() == Interface { 843 tt := (*interfaceType)(unsafe.Pointer(t)) 844 return tt.MethodByName(name) 845 } 846 ut := t.uncommon() 847 if ut == nil { 848 return Method{}, false 849 } 850 // TODO(mdempsky): Binary search. 851 for i, p := range ut.exportedMethods() { 852 if t.nameOff(p.name).name() == name { 853 return t.Method(i), true 854 } 855 } 856 return Method{}, false 857 } 858 859 func (t *rtype) PkgPath() string { 860 if t.tflag&tflagNamed == 0 { 861 return "" 862 } 863 ut := t.uncommon() 864 if ut == nil { 865 return "" 866 } 867 return t.nameOff(ut.pkgPath).name() 868 } 869 870 func hasPrefix(s, prefix string) bool { 871 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 872 } 873 874 func (t *rtype) Name() string { 875 if t.tflag&tflagNamed == 0 { 876 return "" 877 } 878 s := t.String() 879 i := len(s) - 1 880 for i >= 0 { 881 if s[i] == '.' { 882 break 883 } 884 i-- 885 } 886 return s[i+1:] 887 } 888 889 func (t *rtype) ChanDir() ChanDir { 890 if t.Kind() != Chan { 891 panic("reflect: ChanDir of non-chan type") 892 } 893 tt := (*chanType)(unsafe.Pointer(t)) 894 return ChanDir(tt.dir) 895 } 896 897 func (t *rtype) IsVariadic() bool { 898 if t.Kind() != Func { 899 panic("reflect: IsVariadic of non-func type") 900 } 901 tt := (*funcType)(unsafe.Pointer(t)) 902 return tt.outCount&(1<<15) != 0 903 } 904 905 func (t *rtype) Elem() Type { 906 switch t.Kind() { 907 case Array: 908 tt := (*arrayType)(unsafe.Pointer(t)) 909 return toType(tt.elem) 910 case Chan: 911 tt := (*chanType)(unsafe.Pointer(t)) 912 return toType(tt.elem) 913 case Map: 914 tt := (*mapType)(unsafe.Pointer(t)) 915 return toType(tt.elem) 916 case Ptr: 917 tt := (*ptrType)(unsafe.Pointer(t)) 918 return toType(tt.elem) 919 case Slice: 920 tt := (*sliceType)(unsafe.Pointer(t)) 921 return toType(tt.elem) 922 } 923 panic("reflect: Elem of invalid type") 924 } 925 926 func (t *rtype) Field(i int) StructField { 927 if t.Kind() != Struct { 928 panic("reflect: Field of non-struct type") 929 } 930 tt := (*structType)(unsafe.Pointer(t)) 931 return tt.Field(i) 932 } 933 934 func (t *rtype) FieldByIndex(index []int) StructField { 935 if t.Kind() != Struct { 936 panic("reflect: FieldByIndex of non-struct type") 937 } 938 tt := (*structType)(unsafe.Pointer(t)) 939 return tt.FieldByIndex(index) 940 } 941 942 func (t *rtype) FieldByName(name string) (StructField, bool) { 943 if t.Kind() != Struct { 944 panic("reflect: FieldByName of non-struct type") 945 } 946 tt := (*structType)(unsafe.Pointer(t)) 947 return tt.FieldByName(name) 948 } 949 950 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 951 if t.Kind() != Struct { 952 panic("reflect: FieldByNameFunc of non-struct type") 953 } 954 tt := (*structType)(unsafe.Pointer(t)) 955 return tt.FieldByNameFunc(match) 956 } 957 958 func (t *rtype) In(i int) Type { 959 if t.Kind() != Func { 960 panic("reflect: In of non-func type") 961 } 962 tt := (*funcType)(unsafe.Pointer(t)) 963 return toType(tt.in()[i]) 964 } 965 966 func (t *rtype) Key() Type { 967 if t.Kind() != Map { 968 panic("reflect: Key of non-map type") 969 } 970 tt := (*mapType)(unsafe.Pointer(t)) 971 return toType(tt.key) 972 } 973 974 func (t *rtype) Len() int { 975 if t.Kind() != Array { 976 panic("reflect: Len of non-array type") 977 } 978 tt := (*arrayType)(unsafe.Pointer(t)) 979 return int(tt.len) 980 } 981 982 func (t *rtype) NumField() int { 983 if t.Kind() != Struct { 984 panic("reflect: NumField of non-struct type") 985 } 986 tt := (*structType)(unsafe.Pointer(t)) 987 return len(tt.fields) 988 } 989 990 func (t *rtype) NumIn() int { 991 if t.Kind() != Func { 992 panic("reflect: NumIn of non-func type") 993 } 994 tt := (*funcType)(unsafe.Pointer(t)) 995 return int(tt.inCount) 996 } 997 998 func (t *rtype) NumOut() int { 999 if t.Kind() != Func { 1000 panic("reflect: NumOut of non-func type") 1001 } 1002 tt := (*funcType)(unsafe.Pointer(t)) 1003 return len(tt.out()) 1004 } 1005 1006 func (t *rtype) Out(i int) Type { 1007 if t.Kind() != Func { 1008 panic("reflect: Out of non-func type") 1009 } 1010 tt := (*funcType)(unsafe.Pointer(t)) 1011 return toType(tt.out()[i]) 1012 } 1013 1014 func (t *funcType) in() []*rtype { 1015 uadd := unsafe.Sizeof(*t) 1016 if t.tflag&tflagUncommon != 0 { 1017 uadd += unsafe.Sizeof(uncommonType{}) 1018 } 1019 if t.inCount == 0 { 1020 return nil 1021 } 1022 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount] 1023 } 1024 1025 func (t *funcType) out() []*rtype { 1026 uadd := unsafe.Sizeof(*t) 1027 if t.tflag&tflagUncommon != 0 { 1028 uadd += unsafe.Sizeof(uncommonType{}) 1029 } 1030 outCount := t.outCount & (1<<15 - 1) 1031 if outCount == 0 { 1032 return nil 1033 } 1034 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount] 1035 } 1036 1037 // add returns p+x. 1038 // 1039 // The whySafe string is ignored, so that the function still inlines 1040 // as efficiently as p+x, but all call sites should use the string to 1041 // record why the addition is safe, which is to say why the addition 1042 // does not cause x to advance to the very end of p's allocation 1043 // and therefore point incorrectly at the next block in memory. 1044 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 1045 return unsafe.Pointer(uintptr(p) + x) 1046 } 1047 1048 func (d ChanDir) String() string { 1049 switch d { 1050 case SendDir: 1051 return "chan<-" 1052 case RecvDir: 1053 return "<-chan" 1054 case BothDir: 1055 return "chan" 1056 } 1057 return "ChanDir" + strconv.Itoa(int(d)) 1058 } 1059 1060 // Method returns the i'th method in the type's method set. 1061 func (t *interfaceType) Method(i int) (m Method) { 1062 if i < 0 || i >= len(t.methods) { 1063 return 1064 } 1065 p := &t.methods[i] 1066 pname := t.nameOff(p.name) 1067 m.Name = pname.name() 1068 if !pname.isExported() { 1069 m.PkgPath = pname.pkgPath() 1070 if m.PkgPath == "" { 1071 m.PkgPath = t.pkgPath.name() 1072 } 1073 } 1074 m.Type = toType(t.typeOff(p.typ)) 1075 m.Index = i 1076 return 1077 } 1078 1079 // NumMethod returns the number of interface methods in the type's method set. 1080 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1081 1082 // MethodByName method with the given name in the type's method set. 1083 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1084 if t == nil { 1085 return 1086 } 1087 var p *imethod 1088 for i := range t.methods { 1089 p = &t.methods[i] 1090 if t.nameOff(p.name).name() == name { 1091 return t.Method(i), true 1092 } 1093 } 1094 return 1095 } 1096 1097 // A StructField describes a single field in a struct. 1098 type StructField struct { 1099 // Name is the field name. 1100 Name string 1101 // PkgPath is the package path that qualifies a lower case (unexported) 1102 // field name. It is empty for upper case (exported) field names. 1103 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1104 PkgPath string 1105 1106 Type Type // field type 1107 Tag StructTag // field tag string 1108 Offset uintptr // offset within struct, in bytes 1109 Index []int // index sequence for Type.FieldByIndex 1110 Anonymous bool // is an embedded field 1111 } 1112 1113 // A StructTag is the tag string in a struct field. 1114 // 1115 // By convention, tag strings are a concatenation of 1116 // optionally space-separated key:"value" pairs. 1117 // Each key is a non-empty string consisting of non-control 1118 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1119 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1120 // characters and Go string literal syntax. 1121 type StructTag string 1122 1123 // Get returns the value associated with key in the tag string. 1124 // If there is no such key in the tag, Get returns the empty string. 1125 // If the tag does not have the conventional format, the value 1126 // returned by Get is unspecified. To determine whether a tag is 1127 // explicitly set to the empty string, use Lookup. 1128 func (tag StructTag) Get(key string) string { 1129 v, _ := tag.Lookup(key) 1130 return v 1131 } 1132 1133 // Lookup returns the value associated with key in the tag string. 1134 // If the key is present in the tag the value (which may be empty) 1135 // is returned. Otherwise the returned value will be the empty string. 1136 // The ok return value reports whether the value was explicitly set in 1137 // the tag string. If the tag does not have the conventional format, 1138 // the value returned by Lookup is unspecified. 1139 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1140 // When modifying this code, also update the validateStructTag code 1141 // in cmd/vet/structtag.go. 1142 1143 for tag != "" { 1144 // Skip leading space. 1145 i := 0 1146 for i < len(tag) && tag[i] == ' ' { 1147 i++ 1148 } 1149 tag = tag[i:] 1150 if tag == "" { 1151 break 1152 } 1153 1154 // Scan to colon. A space, a quote or a control character is a syntax error. 1155 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1156 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1157 // as it is simpler to inspect the tag's bytes than the tag's runes. 1158 i = 0 1159 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1160 i++ 1161 } 1162 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1163 break 1164 } 1165 name := string(tag[:i]) 1166 tag = tag[i+1:] 1167 1168 // Scan quoted string to find value. 1169 i = 1 1170 for i < len(tag) && tag[i] != '"' { 1171 if tag[i] == '\\' { 1172 i++ 1173 } 1174 i++ 1175 } 1176 if i >= len(tag) { 1177 break 1178 } 1179 qvalue := string(tag[:i+1]) 1180 tag = tag[i+1:] 1181 1182 if key == name { 1183 value, err := strconv.Unquote(qvalue) 1184 if err != nil { 1185 break 1186 } 1187 return value, true 1188 } 1189 } 1190 return "", false 1191 } 1192 1193 // Field returns the i'th struct field. 1194 func (t *structType) Field(i int) (f StructField) { 1195 if i < 0 || i >= len(t.fields) { 1196 panic("reflect: Field index out of bounds") 1197 } 1198 p := &t.fields[i] 1199 f.Type = toType(p.typ) 1200 f.Name = p.name.name() 1201 f.Anonymous = p.embedded() 1202 if !p.name.isExported() { 1203 f.PkgPath = t.pkgPath.name() 1204 } 1205 if tag := p.name.tag(); tag != "" { 1206 f.Tag = StructTag(tag) 1207 } 1208 f.Offset = p.offset() 1209 1210 // NOTE(rsc): This is the only allocation in the interface 1211 // presented by a reflect.Type. It would be nice to avoid, 1212 // at least in the common cases, but we need to make sure 1213 // that misbehaving clients of reflect cannot affect other 1214 // uses of reflect. One possibility is CL 5371098, but we 1215 // postponed that ugliness until there is a demonstrated 1216 // need for the performance. This is issue 2320. 1217 f.Index = []int{i} 1218 return 1219 } 1220 1221 // TODO(gri): Should there be an error/bool indicator if the index 1222 // is wrong for FieldByIndex? 1223 1224 // FieldByIndex returns the nested field corresponding to index. 1225 func (t *structType) FieldByIndex(index []int) (f StructField) { 1226 f.Type = toType(&t.rtype) 1227 for i, x := range index { 1228 if i > 0 { 1229 ft := f.Type 1230 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1231 ft = ft.Elem() 1232 } 1233 f.Type = ft 1234 } 1235 f = f.Type.Field(x) 1236 } 1237 return 1238 } 1239 1240 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1241 type fieldScan struct { 1242 typ *structType 1243 index []int 1244 } 1245 1246 // FieldByNameFunc returns the struct field with a name that satisfies the 1247 // match function and a boolean to indicate if the field was found. 1248 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1249 // This uses the same condition that the Go language does: there must be a unique instance 1250 // of the match at a given depth level. If there are multiple instances of a match at the 1251 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1252 // The algorithm is breadth first search, one depth level at a time. 1253 1254 // The current and next slices are work queues: 1255 // current lists the fields to visit on this depth level, 1256 // and next lists the fields on the next lower level. 1257 current := []fieldScan{} 1258 next := []fieldScan{{typ: t}} 1259 1260 // nextCount records the number of times an embedded type has been 1261 // encountered and considered for queueing in the 'next' slice. 1262 // We only queue the first one, but we increment the count on each. 1263 // If a struct type T can be reached more than once at a given depth level, 1264 // then it annihilates itself and need not be considered at all when we 1265 // process that next depth level. 1266 var nextCount map[*structType]int 1267 1268 // visited records the structs that have been considered already. 1269 // Embedded pointer fields can create cycles in the graph of 1270 // reachable embedded types; visited avoids following those cycles. 1271 // It also avoids duplicated effort: if we didn't find the field in an 1272 // embedded type T at level 2, we won't find it in one at level 4 either. 1273 visited := map[*structType]bool{} 1274 1275 for len(next) > 0 { 1276 current, next = next, current[:0] 1277 count := nextCount 1278 nextCount = nil 1279 1280 // Process all the fields at this depth, now listed in 'current'. 1281 // The loop queues embedded fields found in 'next', for processing during the next 1282 // iteration. The multiplicity of the 'current' field counts is recorded 1283 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1284 for _, scan := range current { 1285 t := scan.typ 1286 if visited[t] { 1287 // We've looked through this type before, at a higher level. 1288 // That higher level would shadow the lower level we're now at, 1289 // so this one can't be useful to us. Ignore it. 1290 continue 1291 } 1292 visited[t] = true 1293 for i := range t.fields { 1294 f := &t.fields[i] 1295 // Find name and (for embedded field) type for field f. 1296 fname := f.name.name() 1297 var ntyp *rtype 1298 if f.embedded() { 1299 // Embedded field of type T or *T. 1300 ntyp = f.typ 1301 if ntyp.Kind() == Ptr { 1302 ntyp = ntyp.Elem().common() 1303 } 1304 } 1305 1306 // Does it match? 1307 if match(fname) { 1308 // Potential match 1309 if count[t] > 1 || ok { 1310 // Name appeared multiple times at this level: annihilate. 1311 return StructField{}, false 1312 } 1313 result = t.Field(i) 1314 result.Index = nil 1315 result.Index = append(result.Index, scan.index...) 1316 result.Index = append(result.Index, i) 1317 ok = true 1318 continue 1319 } 1320 1321 // Queue embedded struct fields for processing with next level, 1322 // but only if we haven't seen a match yet at this level and only 1323 // if the embedded types haven't already been queued. 1324 if ok || ntyp == nil || ntyp.Kind() != Struct { 1325 continue 1326 } 1327 styp := (*structType)(unsafe.Pointer(ntyp)) 1328 if nextCount[styp] > 0 { 1329 nextCount[styp] = 2 // exact multiple doesn't matter 1330 continue 1331 } 1332 if nextCount == nil { 1333 nextCount = map[*structType]int{} 1334 } 1335 nextCount[styp] = 1 1336 if count[t] > 1 { 1337 nextCount[styp] = 2 // exact multiple doesn't matter 1338 } 1339 var index []int 1340 index = append(index, scan.index...) 1341 index = append(index, i) 1342 next = append(next, fieldScan{styp, index}) 1343 } 1344 } 1345 if ok { 1346 break 1347 } 1348 } 1349 return 1350 } 1351 1352 // FieldByName returns the struct field with the given name 1353 // and a boolean to indicate if the field was found. 1354 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1355 // Quick check for top-level name, or struct without embedded fields. 1356 hasEmbeds := false 1357 if name != "" { 1358 for i := range t.fields { 1359 tf := &t.fields[i] 1360 if tf.name.name() == name { 1361 return t.Field(i), true 1362 } 1363 if tf.embedded() { 1364 hasEmbeds = true 1365 } 1366 } 1367 } 1368 if !hasEmbeds { 1369 return 1370 } 1371 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1372 } 1373 1374 // TypeOf returns the reflection Type that represents the dynamic type of i. 1375 // If i is a nil interface value, TypeOf returns nil. 1376 func TypeOf(i interface{}) Type { 1377 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1378 return toType(eface.typ) 1379 } 1380 1381 // ptrMap is the cache for PtrTo. 1382 var ptrMap sync.Map // map[*rtype]*ptrType 1383 1384 // PtrTo returns the pointer type with element t. 1385 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1386 func PtrTo(t Type) Type { 1387 return t.(*rtype).ptrTo() 1388 } 1389 1390 func (t *rtype) ptrTo() *rtype { 1391 if t.ptrToThis != 0 { 1392 return t.typeOff(t.ptrToThis) 1393 } 1394 1395 // Check the cache. 1396 if pi, ok := ptrMap.Load(t); ok { 1397 return &pi.(*ptrType).rtype 1398 } 1399 1400 // Look in known types. 1401 s := "*" + t.String() 1402 for _, tt := range typesByString(s) { 1403 p := (*ptrType)(unsafe.Pointer(tt)) 1404 if p.elem != t { 1405 continue 1406 } 1407 pi, _ := ptrMap.LoadOrStore(t, p) 1408 return &pi.(*ptrType).rtype 1409 } 1410 1411 // Create a new ptrType starting with the description 1412 // of an *unsafe.Pointer. 1413 var iptr interface{} = (*unsafe.Pointer)(nil) 1414 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1415 pp := *prototype 1416 1417 pp.str = resolveReflectName(newName(s, "", false)) 1418 pp.ptrToThis = 0 1419 1420 // For the type structures linked into the binary, the 1421 // compiler provides a good hash of the string. 1422 // Create a good hash for the new string by using 1423 // the FNV-1 hash's mixing function to combine the 1424 // old hash and the new "*". 1425 pp.hash = fnv1(t.hash, '*') 1426 1427 pp.elem = t 1428 1429 pi, _ := ptrMap.LoadOrStore(t, &pp) 1430 return &pi.(*ptrType).rtype 1431 } 1432 1433 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1434 func fnv1(x uint32, list ...byte) uint32 { 1435 for _, b := range list { 1436 x = x*16777619 ^ uint32(b) 1437 } 1438 return x 1439 } 1440 1441 func (t *rtype) Implements(u Type) bool { 1442 if u == nil { 1443 panic("reflect: nil type passed to Type.Implements") 1444 } 1445 if u.Kind() != Interface { 1446 panic("reflect: non-interface type passed to Type.Implements") 1447 } 1448 return implements(u.(*rtype), t) 1449 } 1450 1451 func (t *rtype) AssignableTo(u Type) bool { 1452 if u == nil { 1453 panic("reflect: nil type passed to Type.AssignableTo") 1454 } 1455 uu := u.(*rtype) 1456 return directlyAssignable(uu, t) || implements(uu, t) 1457 } 1458 1459 func (t *rtype) ConvertibleTo(u Type) bool { 1460 if u == nil { 1461 panic("reflect: nil type passed to Type.ConvertibleTo") 1462 } 1463 uu := u.(*rtype) 1464 return convertOp(uu, t) != nil 1465 } 1466 1467 func (t *rtype) Comparable() bool { 1468 return t.alg != nil && t.alg.equal != nil 1469 } 1470 1471 // implements reports whether the type V implements the interface type T. 1472 func implements(T, V *rtype) bool { 1473 if T.Kind() != Interface { 1474 return false 1475 } 1476 t := (*interfaceType)(unsafe.Pointer(T)) 1477 if len(t.methods) == 0 { 1478 return true 1479 } 1480 1481 // The same algorithm applies in both cases, but the 1482 // method tables for an interface type and a concrete type 1483 // are different, so the code is duplicated. 1484 // In both cases the algorithm is a linear scan over the two 1485 // lists - T's methods and V's methods - simultaneously. 1486 // Since method tables are stored in a unique sorted order 1487 // (alphabetical, with no duplicate method names), the scan 1488 // through V's methods must hit a match for each of T's 1489 // methods along the way, or else V does not implement T. 1490 // This lets us run the scan in overall linear time instead of 1491 // the quadratic time a naive search would require. 1492 // See also ../runtime/iface.go. 1493 if V.Kind() == Interface { 1494 v := (*interfaceType)(unsafe.Pointer(V)) 1495 i := 0 1496 for j := 0; j < len(v.methods); j++ { 1497 tm := &t.methods[i] 1498 tmName := t.nameOff(tm.name) 1499 vm := &v.methods[j] 1500 vmName := V.nameOff(vm.name) 1501 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1502 if !tmName.isExported() { 1503 tmPkgPath := tmName.pkgPath() 1504 if tmPkgPath == "" { 1505 tmPkgPath = t.pkgPath.name() 1506 } 1507 vmPkgPath := vmName.pkgPath() 1508 if vmPkgPath == "" { 1509 vmPkgPath = v.pkgPath.name() 1510 } 1511 if tmPkgPath != vmPkgPath { 1512 continue 1513 } 1514 } 1515 if i++; i >= len(t.methods) { 1516 return true 1517 } 1518 } 1519 } 1520 return false 1521 } 1522 1523 v := V.uncommon() 1524 if v == nil { 1525 return false 1526 } 1527 i := 0 1528 vmethods := v.methods() 1529 for j := 0; j < int(v.mcount); j++ { 1530 tm := &t.methods[i] 1531 tmName := t.nameOff(tm.name) 1532 vm := vmethods[j] 1533 vmName := V.nameOff(vm.name) 1534 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1535 if !tmName.isExported() { 1536 tmPkgPath := tmName.pkgPath() 1537 if tmPkgPath == "" { 1538 tmPkgPath = t.pkgPath.name() 1539 } 1540 vmPkgPath := vmName.pkgPath() 1541 if vmPkgPath == "" { 1542 vmPkgPath = V.nameOff(v.pkgPath).name() 1543 } 1544 if tmPkgPath != vmPkgPath { 1545 continue 1546 } 1547 } 1548 if i++; i >= len(t.methods) { 1549 return true 1550 } 1551 } 1552 } 1553 return false 1554 } 1555 1556 // directlyAssignable reports whether a value x of type V can be directly 1557 // assigned (using memmove) to a value of type T. 1558 // https://golang.org/doc/go_spec.html#Assignability 1559 // Ignoring the interface rules (implemented elsewhere) 1560 // and the ideal constant rules (no ideal constants at run time). 1561 func directlyAssignable(T, V *rtype) bool { 1562 // x's type V is identical to T? 1563 if T == V { 1564 return true 1565 } 1566 1567 // Otherwise at least one of T and V must not be defined 1568 // and they must have the same kind. 1569 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { 1570 return false 1571 } 1572 1573 // x's type T and V must have identical underlying types. 1574 return haveIdenticalUnderlyingType(T, V, true) 1575 } 1576 1577 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1578 if cmpTags { 1579 return T == V 1580 } 1581 1582 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1583 return false 1584 } 1585 1586 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1587 } 1588 1589 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1590 if T == V { 1591 return true 1592 } 1593 1594 kind := T.Kind() 1595 if kind != V.Kind() { 1596 return false 1597 } 1598 1599 // Non-composite types of equal kind have same underlying type 1600 // (the predefined instance of the type). 1601 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1602 return true 1603 } 1604 1605 // Composite types. 1606 switch kind { 1607 case Array: 1608 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1609 1610 case Chan: 1611 // Special case: 1612 // x is a bidirectional channel value, T is a channel type, 1613 // and x's type V and T have identical element types. 1614 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) { 1615 return true 1616 } 1617 1618 // Otherwise continue test for identical underlying type. 1619 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1620 1621 case Func: 1622 t := (*funcType)(unsafe.Pointer(T)) 1623 v := (*funcType)(unsafe.Pointer(V)) 1624 if t.outCount != v.outCount || t.inCount != v.inCount { 1625 return false 1626 } 1627 for i := 0; i < t.NumIn(); i++ { 1628 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1629 return false 1630 } 1631 } 1632 for i := 0; i < t.NumOut(); i++ { 1633 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1634 return false 1635 } 1636 } 1637 return true 1638 1639 case Interface: 1640 t := (*interfaceType)(unsafe.Pointer(T)) 1641 v := (*interfaceType)(unsafe.Pointer(V)) 1642 if len(t.methods) == 0 && len(v.methods) == 0 { 1643 return true 1644 } 1645 // Might have the same methods but still 1646 // need a run time conversion. 1647 return false 1648 1649 case Map: 1650 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1651 1652 case Ptr, Slice: 1653 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1654 1655 case Struct: 1656 t := (*structType)(unsafe.Pointer(T)) 1657 v := (*structType)(unsafe.Pointer(V)) 1658 if len(t.fields) != len(v.fields) { 1659 return false 1660 } 1661 if t.pkgPath.name() != v.pkgPath.name() { 1662 return false 1663 } 1664 for i := range t.fields { 1665 tf := &t.fields[i] 1666 vf := &v.fields[i] 1667 if tf.name.name() != vf.name.name() { 1668 return false 1669 } 1670 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1671 return false 1672 } 1673 if cmpTags && tf.name.tag() != vf.name.tag() { 1674 return false 1675 } 1676 if tf.offsetEmbed != vf.offsetEmbed { 1677 return false 1678 } 1679 } 1680 return true 1681 } 1682 1683 return false 1684 } 1685 1686 // typelinks is implemented in package runtime. 1687 // It returns a slice of the sections in each module, 1688 // and a slice of *rtype offsets in each module. 1689 // 1690 // The types in each module are sorted by string. That is, the first 1691 // two linked types of the first module are: 1692 // 1693 // d0 := sections[0] 1694 // t1 := (*rtype)(add(d0, offset[0][0])) 1695 // t2 := (*rtype)(add(d0, offset[0][1])) 1696 // 1697 // and 1698 // 1699 // t1.String() < t2.String() 1700 // 1701 // Note that strings are not unique identifiers for types: 1702 // there can be more than one with a given string. 1703 // Only types we might want to look up are included: 1704 // pointers, channels, maps, slices, and arrays. 1705 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1706 1707 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1708 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1709 } 1710 1711 // typesByString returns the subslice of typelinks() whose elements have 1712 // the given string representation. 1713 // It may be empty (no known types with that string) or may have 1714 // multiple elements (multiple types with that string). 1715 func typesByString(s string) []*rtype { 1716 sections, offset := typelinks() 1717 var ret []*rtype 1718 1719 for offsI, offs := range offset { 1720 section := sections[offsI] 1721 1722 // We are looking for the first index i where the string becomes >= s. 1723 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1724 i, j := 0, len(offs) 1725 for i < j { 1726 h := i + (j-i)/2 // avoid overflow when computing h 1727 // i ≤ h < j 1728 if !(rtypeOff(section, offs[h]).String() >= s) { 1729 i = h + 1 // preserves f(i-1) == false 1730 } else { 1731 j = h // preserves f(j) == true 1732 } 1733 } 1734 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1735 1736 // Having found the first, linear scan forward to find the last. 1737 // We could do a second binary search, but the caller is going 1738 // to do a linear scan anyway. 1739 for j := i; j < len(offs); j++ { 1740 typ := rtypeOff(section, offs[j]) 1741 if typ.String() != s { 1742 break 1743 } 1744 ret = append(ret, typ) 1745 } 1746 } 1747 return ret 1748 } 1749 1750 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1751 var lookupCache sync.Map // map[cacheKey]*rtype 1752 1753 // A cacheKey is the key for use in the lookupCache. 1754 // Four values describe any of the types we are looking for: 1755 // type kind, one or two subtypes, and an extra integer. 1756 type cacheKey struct { 1757 kind Kind 1758 t1 *rtype 1759 t2 *rtype 1760 extra uintptr 1761 } 1762 1763 // The funcLookupCache caches FuncOf lookups. 1764 // FuncOf does not share the common lookupCache since cacheKey is not 1765 // sufficient to represent functions unambiguously. 1766 var funcLookupCache struct { 1767 sync.Mutex // Guards stores (but not loads) on m. 1768 1769 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1770 // Elements of m are append-only and thus safe for concurrent reading. 1771 m sync.Map 1772 } 1773 1774 // ChanOf returns the channel type with the given direction and element type. 1775 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1776 // 1777 // The gc runtime imposes a limit of 64 kB on channel element types. 1778 // If t's size is equal to or exceeds this limit, ChanOf panics. 1779 func ChanOf(dir ChanDir, t Type) Type { 1780 typ := t.(*rtype) 1781 1782 // Look in cache. 1783 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1784 if ch, ok := lookupCache.Load(ckey); ok { 1785 return ch.(*rtype) 1786 } 1787 1788 // This restriction is imposed by the gc compiler and the runtime. 1789 if typ.size >= 1<<16 { 1790 panic("reflect.ChanOf: element size too large") 1791 } 1792 1793 // Look in known types. 1794 // TODO: Precedence when constructing string. 1795 var s string 1796 switch dir { 1797 default: 1798 panic("reflect.ChanOf: invalid dir") 1799 case SendDir: 1800 s = "chan<- " + typ.String() 1801 case RecvDir: 1802 s = "<-chan " + typ.String() 1803 case BothDir: 1804 s = "chan " + typ.String() 1805 } 1806 for _, tt := range typesByString(s) { 1807 ch := (*chanType)(unsafe.Pointer(tt)) 1808 if ch.elem == typ && ch.dir == uintptr(dir) { 1809 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1810 return ti.(Type) 1811 } 1812 } 1813 1814 // Make a channel type. 1815 var ichan interface{} = (chan unsafe.Pointer)(nil) 1816 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1817 ch := *prototype 1818 ch.tflag = 0 1819 ch.dir = uintptr(dir) 1820 ch.str = resolveReflectName(newName(s, "", false)) 1821 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1822 ch.elem = typ 1823 1824 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) 1825 return ti.(Type) 1826 } 1827 1828 func ismapkey(*rtype) bool // implemented in runtime 1829 1830 // MapOf returns the map type with the given key and element types. 1831 // For example, if k represents int and e represents string, 1832 // MapOf(k, e) represents map[int]string. 1833 // 1834 // If the key type is not a valid map key type (that is, if it does 1835 // not implement Go's == operator), MapOf panics. 1836 func MapOf(key, elem Type) Type { 1837 ktyp := key.(*rtype) 1838 etyp := elem.(*rtype) 1839 1840 if !ismapkey(ktyp) { 1841 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1842 } 1843 1844 // Look in cache. 1845 ckey := cacheKey{Map, ktyp, etyp, 0} 1846 if mt, ok := lookupCache.Load(ckey); ok { 1847 return mt.(Type) 1848 } 1849 1850 // Look in known types. 1851 s := "map[" + ktyp.String() + "]" + etyp.String() 1852 for _, tt := range typesByString(s) { 1853 mt := (*mapType)(unsafe.Pointer(tt)) 1854 if mt.key == ktyp && mt.elem == etyp { 1855 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1856 return ti.(Type) 1857 } 1858 } 1859 1860 // Make a map type. 1861 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1862 mt := **(**mapType)(unsafe.Pointer(&imap)) 1863 mt.str = resolveReflectName(newName(s, "", false)) 1864 mt.tflag = 0 1865 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1866 mt.key = ktyp 1867 mt.elem = etyp 1868 mt.bucket = bucketOf(ktyp, etyp) 1869 if ktyp.size > maxKeySize { 1870 mt.keysize = uint8(ptrSize) 1871 mt.indirectkey = 1 1872 } else { 1873 mt.keysize = uint8(ktyp.size) 1874 mt.indirectkey = 0 1875 } 1876 if etyp.size > maxValSize { 1877 mt.valuesize = uint8(ptrSize) 1878 mt.indirectvalue = 1 1879 } else { 1880 mt.valuesize = uint8(etyp.size) 1881 mt.indirectvalue = 0 1882 } 1883 mt.bucketsize = uint16(mt.bucket.size) 1884 mt.reflexivekey = isReflexive(ktyp) 1885 mt.needkeyupdate = needKeyUpdate(ktyp) 1886 mt.ptrToThis = 0 1887 1888 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) 1889 return ti.(Type) 1890 } 1891 1892 type funcTypeFixed4 struct { 1893 funcType 1894 args [4]*rtype 1895 } 1896 type funcTypeFixed8 struct { 1897 funcType 1898 args [8]*rtype 1899 } 1900 type funcTypeFixed16 struct { 1901 funcType 1902 args [16]*rtype 1903 } 1904 type funcTypeFixed32 struct { 1905 funcType 1906 args [32]*rtype 1907 } 1908 type funcTypeFixed64 struct { 1909 funcType 1910 args [64]*rtype 1911 } 1912 type funcTypeFixed128 struct { 1913 funcType 1914 args [128]*rtype 1915 } 1916 1917 // FuncOf returns the function type with the given argument and result types. 1918 // For example if k represents int and e represents string, 1919 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1920 // 1921 // The variadic argument controls whether the function is variadic. FuncOf 1922 // panics if the in[len(in)-1] does not represent a slice and variadic is 1923 // true. 1924 func FuncOf(in, out []Type, variadic bool) Type { 1925 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1926 panic("reflect.FuncOf: last arg of variadic func must be slice") 1927 } 1928 1929 // Make a func type. 1930 var ifunc interface{} = (func())(nil) 1931 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1932 n := len(in) + len(out) 1933 1934 var ft *funcType 1935 var args []*rtype 1936 switch { 1937 case n <= 4: 1938 fixed := new(funcTypeFixed4) 1939 args = fixed.args[:0:len(fixed.args)] 1940 ft = &fixed.funcType 1941 case n <= 8: 1942 fixed := new(funcTypeFixed8) 1943 args = fixed.args[:0:len(fixed.args)] 1944 ft = &fixed.funcType 1945 case n <= 16: 1946 fixed := new(funcTypeFixed16) 1947 args = fixed.args[:0:len(fixed.args)] 1948 ft = &fixed.funcType 1949 case n <= 32: 1950 fixed := new(funcTypeFixed32) 1951 args = fixed.args[:0:len(fixed.args)] 1952 ft = &fixed.funcType 1953 case n <= 64: 1954 fixed := new(funcTypeFixed64) 1955 args = fixed.args[:0:len(fixed.args)] 1956 ft = &fixed.funcType 1957 case n <= 128: 1958 fixed := new(funcTypeFixed128) 1959 args = fixed.args[:0:len(fixed.args)] 1960 ft = &fixed.funcType 1961 default: 1962 panic("reflect.FuncOf: too many arguments") 1963 } 1964 *ft = *prototype 1965 1966 // Build a hash and minimally populate ft. 1967 var hash uint32 1968 for _, in := range in { 1969 t := in.(*rtype) 1970 args = append(args, t) 1971 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 1972 } 1973 if variadic { 1974 hash = fnv1(hash, 'v') 1975 } 1976 hash = fnv1(hash, '.') 1977 for _, out := range out { 1978 t := out.(*rtype) 1979 args = append(args, t) 1980 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 1981 } 1982 if len(args) > 50 { 1983 panic("reflect.FuncOf does not support more than 50 arguments") 1984 } 1985 ft.tflag = 0 1986 ft.hash = hash 1987 ft.inCount = uint16(len(in)) 1988 ft.outCount = uint16(len(out)) 1989 if variadic { 1990 ft.outCount |= 1 << 15 1991 } 1992 1993 // Look in cache. 1994 if ts, ok := funcLookupCache.m.Load(hash); ok { 1995 for _, t := range ts.([]*rtype) { 1996 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 1997 return t 1998 } 1999 } 2000 } 2001 2002 // Not in cache, lock and retry. 2003 funcLookupCache.Lock() 2004 defer funcLookupCache.Unlock() 2005 if ts, ok := funcLookupCache.m.Load(hash); ok { 2006 for _, t := range ts.([]*rtype) { 2007 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2008 return t 2009 } 2010 } 2011 } 2012 2013 addToCache := func(tt *rtype) Type { 2014 var rts []*rtype 2015 if rti, ok := funcLookupCache.m.Load(hash); ok { 2016 rts = rti.([]*rtype) 2017 } 2018 funcLookupCache.m.Store(hash, append(rts, tt)) 2019 return tt 2020 } 2021 2022 // Look in known types for the same string representation. 2023 str := funcStr(ft) 2024 for _, tt := range typesByString(str) { 2025 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2026 return addToCache(tt) 2027 } 2028 } 2029 2030 // Populate the remaining fields of ft and store in cache. 2031 ft.str = resolveReflectName(newName(str, "", false)) 2032 ft.ptrToThis = 0 2033 return addToCache(&ft.rtype) 2034 } 2035 2036 // funcStr builds a string representation of a funcType. 2037 func funcStr(ft *funcType) string { 2038 repr := make([]byte, 0, 64) 2039 repr = append(repr, "func("...) 2040 for i, t := range ft.in() { 2041 if i > 0 { 2042 repr = append(repr, ", "...) 2043 } 2044 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2045 repr = append(repr, "..."...) 2046 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2047 } else { 2048 repr = append(repr, t.String()...) 2049 } 2050 } 2051 repr = append(repr, ')') 2052 out := ft.out() 2053 if len(out) == 1 { 2054 repr = append(repr, ' ') 2055 } else if len(out) > 1 { 2056 repr = append(repr, " ("...) 2057 } 2058 for i, t := range out { 2059 if i > 0 { 2060 repr = append(repr, ", "...) 2061 } 2062 repr = append(repr, t.String()...) 2063 } 2064 if len(out) > 1 { 2065 repr = append(repr, ')') 2066 } 2067 return string(repr) 2068 } 2069 2070 // isReflexive reports whether the == operation on the type is reflexive. 2071 // That is, x == x for all values x of type t. 2072 func isReflexive(t *rtype) bool { 2073 switch t.Kind() { 2074 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2075 return true 2076 case Float32, Float64, Complex64, Complex128, Interface: 2077 return false 2078 case Array: 2079 tt := (*arrayType)(unsafe.Pointer(t)) 2080 return isReflexive(tt.elem) 2081 case Struct: 2082 tt := (*structType)(unsafe.Pointer(t)) 2083 for _, f := range tt.fields { 2084 if !isReflexive(f.typ) { 2085 return false 2086 } 2087 } 2088 return true 2089 default: 2090 // Func, Map, Slice, Invalid 2091 panic("isReflexive called on non-key type " + t.String()) 2092 } 2093 } 2094 2095 // needKeyUpdate reports whether map overwrites require the key to be copied. 2096 func needKeyUpdate(t *rtype) bool { 2097 switch t.Kind() { 2098 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2099 return false 2100 case Float32, Float64, Complex64, Complex128, Interface, String: 2101 // Float keys can be updated from +0 to -0. 2102 // String keys can be updated to use a smaller backing store. 2103 // Interfaces might have floats of strings in them. 2104 return true 2105 case Array: 2106 tt := (*arrayType)(unsafe.Pointer(t)) 2107 return needKeyUpdate(tt.elem) 2108 case Struct: 2109 tt := (*structType)(unsafe.Pointer(t)) 2110 for _, f := range tt.fields { 2111 if needKeyUpdate(f.typ) { 2112 return true 2113 } 2114 } 2115 return false 2116 default: 2117 // Func, Map, Slice, Invalid 2118 panic("needKeyUpdate called on non-key type " + t.String()) 2119 } 2120 } 2121 2122 // Make sure these routines stay in sync with ../../runtime/map.go! 2123 // These types exist only for GC, so we only fill out GC relevant info. 2124 // Currently, that's just size and the GC program. We also fill in string 2125 // for possible debugging use. 2126 const ( 2127 bucketSize uintptr = 8 2128 maxKeySize uintptr = 128 2129 maxValSize uintptr = 128 2130 ) 2131 2132 func bucketOf(ktyp, etyp *rtype) *rtype { 2133 // See comment on hmap.overflow in ../runtime/map.go. 2134 var kind uint8 2135 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 && 2136 ktyp.size <= maxKeySize && etyp.size <= maxValSize { 2137 kind = kindNoPointers 2138 } 2139 2140 if ktyp.size > maxKeySize { 2141 ktyp = PtrTo(ktyp).(*rtype) 2142 } 2143 if etyp.size > maxValSize { 2144 etyp = PtrTo(etyp).(*rtype) 2145 } 2146 2147 // Prepare GC data if any. 2148 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2149 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2150 // Note that since the key and value are known to be <= 128 bytes, 2151 // they're guaranteed to have bitmaps instead of GC programs. 2152 var gcdata *byte 2153 var ptrdata uintptr 2154 var overflowPad uintptr 2155 2156 // On NaCl, pad if needed to make overflow end at the proper struct alignment. 2157 // On other systems, align > ptrSize is not possible. 2158 if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) { 2159 overflowPad = ptrSize 2160 } 2161 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2162 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2163 panic("reflect: bad size computation in MapOf") 2164 } 2165 2166 if kind != kindNoPointers { 2167 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2168 mask := make([]byte, (nptr+7)/8) 2169 base := bucketSize / ptrSize 2170 2171 if ktyp.kind&kindNoPointers == 0 { 2172 if ktyp.kind&kindGCProg != 0 { 2173 panic("reflect: unexpected GC program in MapOf") 2174 } 2175 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata)) 2176 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ { 2177 if (kmask[i/8]>>(i%8))&1 != 0 { 2178 for j := uintptr(0); j < bucketSize; j++ { 2179 word := base + j*ktyp.size/ptrSize + i 2180 mask[word/8] |= 1 << (word % 8) 2181 } 2182 } 2183 } 2184 } 2185 base += bucketSize * ktyp.size / ptrSize 2186 2187 if etyp.kind&kindNoPointers == 0 { 2188 if etyp.kind&kindGCProg != 0 { 2189 panic("reflect: unexpected GC program in MapOf") 2190 } 2191 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata)) 2192 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ { 2193 if (emask[i/8]>>(i%8))&1 != 0 { 2194 for j := uintptr(0); j < bucketSize; j++ { 2195 word := base + j*etyp.size/ptrSize + i 2196 mask[word/8] |= 1 << (word % 8) 2197 } 2198 } 2199 } 2200 } 2201 base += bucketSize * etyp.size / ptrSize 2202 base += overflowPad / ptrSize 2203 2204 word := base 2205 mask[word/8] |= 1 << (word % 8) 2206 gcdata = &mask[0] 2207 ptrdata = (word + 1) * ptrSize 2208 2209 // overflow word must be last 2210 if ptrdata != size { 2211 panic("reflect: bad layout computation in MapOf") 2212 } 2213 } 2214 2215 b := &rtype{ 2216 align: ptrSize, 2217 size: size, 2218 kind: kind, 2219 ptrdata: ptrdata, 2220 gcdata: gcdata, 2221 } 2222 if overflowPad > 0 { 2223 b.align = 8 2224 } 2225 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2226 b.str = resolveReflectName(newName(s, "", false)) 2227 return b 2228 } 2229 2230 // SliceOf returns the slice type with element type t. 2231 // For example, if t represents int, SliceOf(t) represents []int. 2232 func SliceOf(t Type) Type { 2233 typ := t.(*rtype) 2234 2235 // Look in cache. 2236 ckey := cacheKey{Slice, typ, nil, 0} 2237 if slice, ok := lookupCache.Load(ckey); ok { 2238 return slice.(Type) 2239 } 2240 2241 // Look in known types. 2242 s := "[]" + typ.String() 2243 for _, tt := range typesByString(s) { 2244 slice := (*sliceType)(unsafe.Pointer(tt)) 2245 if slice.elem == typ { 2246 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2247 return ti.(Type) 2248 } 2249 } 2250 2251 // Make a slice type. 2252 var islice interface{} = ([]unsafe.Pointer)(nil) 2253 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2254 slice := *prototype 2255 slice.tflag = 0 2256 slice.str = resolveReflectName(newName(s, "", false)) 2257 slice.hash = fnv1(typ.hash, '[') 2258 slice.elem = typ 2259 slice.ptrToThis = 0 2260 2261 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) 2262 return ti.(Type) 2263 } 2264 2265 // The structLookupCache caches StructOf lookups. 2266 // StructOf does not share the common lookupCache since we need to pin 2267 // the memory associated with *structTypeFixedN. 2268 var structLookupCache struct { 2269 sync.Mutex // Guards stores (but not loads) on m. 2270 2271 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2272 // Elements in m are append-only and thus safe for concurrent reading. 2273 m sync.Map 2274 } 2275 2276 type structTypeUncommon struct { 2277 structType 2278 u uncommonType 2279 } 2280 2281 // A *rtype representing a struct is followed directly in memory by an 2282 // array of method objects representing the methods attached to the 2283 // struct. To get the same layout for a run time generated type, we 2284 // need an array directly following the uncommonType memory. The types 2285 // structTypeFixed4, ...structTypeFixedN are used to do this. 2286 // 2287 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2288 2289 // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs 2290 // have no methods, they could be defined at runtime using the StructOf 2291 // function. 2292 2293 type structTypeFixed4 struct { 2294 structType 2295 u uncommonType 2296 m [4]method 2297 } 2298 2299 type structTypeFixed8 struct { 2300 structType 2301 u uncommonType 2302 m [8]method 2303 } 2304 2305 type structTypeFixed16 struct { 2306 structType 2307 u uncommonType 2308 m [16]method 2309 } 2310 2311 type structTypeFixed32 struct { 2312 structType 2313 u uncommonType 2314 m [32]method 2315 } 2316 2317 // isLetter returns true if a given 'rune' is classified as a Letter. 2318 func isLetter(ch rune) bool { 2319 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2320 } 2321 2322 // isValidFieldName checks if a string is a valid (struct) field name or not. 2323 // 2324 // According to the language spec, a field name should be an identifier. 2325 // 2326 // identifier = letter { letter | unicode_digit } . 2327 // letter = unicode_letter | "_" . 2328 func isValidFieldName(fieldName string) bool { 2329 for i, c := range fieldName { 2330 if i == 0 && !isLetter(c) { 2331 return false 2332 } 2333 2334 if !(isLetter(c) || unicode.IsDigit(c)) { 2335 return false 2336 } 2337 } 2338 2339 return len(fieldName) > 0 2340 } 2341 2342 // StructOf returns the struct type containing fields. 2343 // The Offset and Index fields are ignored and computed as they would be 2344 // by the compiler. 2345 // 2346 // StructOf currently does not generate wrapper methods for embedded 2347 // fields and panics if passed unexported StructFields. 2348 // These limitations may be lifted in a future version. 2349 func StructOf(fields []StructField) Type { 2350 var ( 2351 hash = fnv1(0, []byte("struct {")...) 2352 size uintptr 2353 typalign uint8 2354 comparable = true 2355 hashable = true 2356 methods []method 2357 2358 fs = make([]structField, len(fields)) 2359 repr = make([]byte, 0, 64) 2360 fset = map[string]struct{}{} // fields' names 2361 2362 hasPtr = false // records whether at least one struct-field is a pointer 2363 hasGCProg = false // records whether a struct-field type has a GCProg 2364 ) 2365 2366 lastzero := uintptr(0) 2367 repr = append(repr, "struct {"...) 2368 for i, field := range fields { 2369 if field.Name == "" { 2370 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2371 } 2372 if !isValidFieldName(field.Name) { 2373 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2374 } 2375 if field.Type == nil { 2376 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2377 } 2378 f := runtimeStructField(field) 2379 ft := f.typ 2380 if ft.kind&kindGCProg != 0 { 2381 hasGCProg = true 2382 } 2383 if ft.pointers() { 2384 hasPtr = true 2385 } 2386 2387 // Update string and hash 2388 name := f.name.name() 2389 hash = fnv1(hash, []byte(name)...) 2390 repr = append(repr, (" " + name)...) 2391 if f.embedded() { 2392 // Embedded field 2393 if f.typ.Kind() == Ptr { 2394 // Embedded ** and *interface{} are illegal 2395 elem := ft.Elem() 2396 if k := elem.Kind(); k == Ptr || k == Interface { 2397 panic("reflect.StructOf: illegal embedded field type " + ft.String()) 2398 } 2399 } 2400 2401 switch f.typ.Kind() { 2402 case Interface: 2403 ift := (*interfaceType)(unsafe.Pointer(ft)) 2404 for im, m := range ift.methods { 2405 if ift.nameOff(m.name).pkgPath() != "" { 2406 // TODO(sbinet). Issue 15924. 2407 panic("reflect: embedded interface with unexported method(s) not implemented") 2408 } 2409 2410 var ( 2411 mtyp = ift.typeOff(m.typ) 2412 ifield = i 2413 imethod = im 2414 ifn Value 2415 tfn Value 2416 ) 2417 2418 if ft.kind&kindDirectIface != 0 { 2419 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2420 var args []Value 2421 var recv = in[0] 2422 if len(in) > 1 { 2423 args = in[1:] 2424 } 2425 return recv.Field(ifield).Method(imethod).Call(args) 2426 }) 2427 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2428 var args []Value 2429 var recv = in[0] 2430 if len(in) > 1 { 2431 args = in[1:] 2432 } 2433 return recv.Field(ifield).Method(imethod).Call(args) 2434 }) 2435 } else { 2436 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2437 var args []Value 2438 var recv = in[0] 2439 if len(in) > 1 { 2440 args = in[1:] 2441 } 2442 return recv.Field(ifield).Method(imethod).Call(args) 2443 }) 2444 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2445 var args []Value 2446 var recv = Indirect(in[0]) 2447 if len(in) > 1 { 2448 args = in[1:] 2449 } 2450 return recv.Field(ifield).Method(imethod).Call(args) 2451 }) 2452 } 2453 2454 methods = append(methods, method{ 2455 name: resolveReflectName(ift.nameOff(m.name)), 2456 mtyp: resolveReflectType(mtyp), 2457 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2458 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2459 }) 2460 } 2461 case Ptr: 2462 ptr := (*ptrType)(unsafe.Pointer(ft)) 2463 if unt := ptr.uncommon(); unt != nil { 2464 if i > 0 && unt.mcount > 0 { 2465 // Issue 15924. 2466 panic("reflect: embedded type with methods not implemented if type is not first field") 2467 } 2468 if len(fields) > 1 { 2469 panic("reflect: embedded type with methods not implemented if there is more than one field") 2470 } 2471 for _, m := range unt.methods() { 2472 mname := ptr.nameOff(m.name) 2473 if mname.pkgPath() != "" { 2474 // TODO(sbinet). 2475 // Issue 15924. 2476 panic("reflect: embedded interface with unexported method(s) not implemented") 2477 } 2478 methods = append(methods, method{ 2479 name: resolveReflectName(mname), 2480 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2481 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2482 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2483 }) 2484 } 2485 } 2486 if unt := ptr.elem.uncommon(); unt != nil { 2487 for _, m := range unt.methods() { 2488 mname := ptr.nameOff(m.name) 2489 if mname.pkgPath() != "" { 2490 // TODO(sbinet) 2491 // Issue 15924. 2492 panic("reflect: embedded interface with unexported method(s) not implemented") 2493 } 2494 methods = append(methods, method{ 2495 name: resolveReflectName(mname), 2496 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2497 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2498 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2499 }) 2500 } 2501 } 2502 default: 2503 if unt := ft.uncommon(); unt != nil { 2504 if i > 0 && unt.mcount > 0 { 2505 // Issue 15924. 2506 panic("reflect: embedded type with methods not implemented if type is not first field") 2507 } 2508 if len(fields) > 1 && ft.kind&kindDirectIface != 0 { 2509 panic("reflect: embedded type with methods not implemented for non-pointer type") 2510 } 2511 for _, m := range unt.methods() { 2512 mname := ft.nameOff(m.name) 2513 if mname.pkgPath() != "" { 2514 // TODO(sbinet) 2515 // Issue 15924. 2516 panic("reflect: embedded interface with unexported method(s) not implemented") 2517 } 2518 methods = append(methods, method{ 2519 name: resolveReflectName(mname), 2520 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2521 ifn: resolveReflectText(ft.textOff(m.ifn)), 2522 tfn: resolveReflectText(ft.textOff(m.tfn)), 2523 }) 2524 2525 } 2526 } 2527 } 2528 } 2529 if _, dup := fset[name]; dup { 2530 panic("reflect.StructOf: duplicate field " + name) 2531 } 2532 fset[name] = struct{}{} 2533 2534 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2535 2536 repr = append(repr, (" " + ft.String())...) 2537 if f.name.tagLen() > 0 { 2538 hash = fnv1(hash, []byte(f.name.tag())...) 2539 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2540 } 2541 if i < len(fields)-1 { 2542 repr = append(repr, ';') 2543 } 2544 2545 comparable = comparable && (ft.alg.equal != nil) 2546 hashable = hashable && (ft.alg.hash != nil) 2547 2548 offset := align(size, uintptr(ft.align)) 2549 if ft.align > typalign { 2550 typalign = ft.align 2551 } 2552 size = offset + ft.size 2553 f.offsetEmbed |= offset << 1 2554 2555 if ft.size == 0 { 2556 lastzero = size 2557 } 2558 2559 fs[i] = f 2560 } 2561 2562 if size > 0 && lastzero == size { 2563 // This is a non-zero sized struct that ends in a 2564 // zero-sized field. We add an extra byte of padding, 2565 // to ensure that taking the address of the final 2566 // zero-sized field can't manufacture a pointer to the 2567 // next object in the heap. See issue 9401. 2568 size++ 2569 } 2570 2571 var typ *structType 2572 var ut *uncommonType 2573 2574 switch { 2575 case len(methods) == 0: 2576 t := new(structTypeUncommon) 2577 typ = &t.structType 2578 ut = &t.u 2579 case len(methods) <= 4: 2580 t := new(structTypeFixed4) 2581 typ = &t.structType 2582 ut = &t.u 2583 copy(t.m[:], methods) 2584 case len(methods) <= 8: 2585 t := new(structTypeFixed8) 2586 typ = &t.structType 2587 ut = &t.u 2588 copy(t.m[:], methods) 2589 case len(methods) <= 16: 2590 t := new(structTypeFixed16) 2591 typ = &t.structType 2592 ut = &t.u 2593 copy(t.m[:], methods) 2594 case len(methods) <= 32: 2595 t := new(structTypeFixed32) 2596 typ = &t.structType 2597 ut = &t.u 2598 copy(t.m[:], methods) 2599 default: 2600 panic("reflect.StructOf: too many methods") 2601 } 2602 // TODO(sbinet): Once we allow embedding multiple types, 2603 // methods will need to be sorted like the compiler does. 2604 // TODO(sbinet): Once we allow non-exported methods, we will 2605 // need to compute xcount as the number of exported methods. 2606 ut.mcount = uint16(len(methods)) 2607 ut.xcount = ut.mcount 2608 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2609 2610 if len(fs) > 0 { 2611 repr = append(repr, ' ') 2612 } 2613 repr = append(repr, '}') 2614 hash = fnv1(hash, '}') 2615 str := string(repr) 2616 2617 // Round the size up to be a multiple of the alignment. 2618 size = align(size, uintptr(typalign)) 2619 2620 // Make the struct type. 2621 var istruct interface{} = struct{}{} 2622 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2623 *typ = *prototype 2624 typ.fields = fs 2625 2626 // Look in cache. 2627 if ts, ok := structLookupCache.m.Load(hash); ok { 2628 for _, st := range ts.([]Type) { 2629 t := st.common() 2630 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2631 return t 2632 } 2633 } 2634 } 2635 2636 // Not in cache, lock and retry. 2637 structLookupCache.Lock() 2638 defer structLookupCache.Unlock() 2639 if ts, ok := structLookupCache.m.Load(hash); ok { 2640 for _, st := range ts.([]Type) { 2641 t := st.common() 2642 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2643 return t 2644 } 2645 } 2646 } 2647 2648 addToCache := func(t Type) Type { 2649 var ts []Type 2650 if ti, ok := structLookupCache.m.Load(hash); ok { 2651 ts = ti.([]Type) 2652 } 2653 structLookupCache.m.Store(hash, append(ts, t)) 2654 return t 2655 } 2656 2657 // Look in known types. 2658 for _, t := range typesByString(str) { 2659 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2660 // even if 't' wasn't a structType with methods, we should be ok 2661 // as the 'u uncommonType' field won't be accessed except when 2662 // tflag&tflagUncommon is set. 2663 return addToCache(t) 2664 } 2665 } 2666 2667 typ.str = resolveReflectName(newName(str, "", false)) 2668 typ.tflag = 0 2669 typ.hash = hash 2670 typ.size = size 2671 typ.align = typalign 2672 typ.fieldAlign = typalign 2673 typ.ptrToThis = 0 2674 if len(methods) > 0 { 2675 typ.tflag |= tflagUncommon 2676 } 2677 if !hasPtr { 2678 typ.kind |= kindNoPointers 2679 } else { 2680 typ.kind &^= kindNoPointers 2681 } 2682 2683 if hasGCProg { 2684 lastPtrField := 0 2685 for i, ft := range fs { 2686 if ft.typ.pointers() { 2687 lastPtrField = i 2688 } 2689 } 2690 prog := []byte{0, 0, 0, 0} // will be length of prog 2691 for i, ft := range fs { 2692 if i > lastPtrField { 2693 // gcprog should not include anything for any field after 2694 // the last field that contains pointer data 2695 break 2696 } 2697 // FIXME(sbinet) handle padding, fields smaller than a word 2698 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:] 2699 elemPtrs := ft.typ.ptrdata / ptrSize 2700 switch { 2701 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0: 2702 // Element is small with pointer mask; use as literal bits. 2703 mask := elemGC 2704 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2705 var n uintptr 2706 for n := elemPtrs; n > 120; n -= 120 { 2707 prog = append(prog, 120) 2708 prog = append(prog, mask[:15]...) 2709 mask = mask[15:] 2710 } 2711 prog = append(prog, byte(n)) 2712 prog = append(prog, mask[:(n+7)/8]...) 2713 case ft.typ.kind&kindGCProg != 0: 2714 // Element has GC program; emit one element. 2715 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2716 prog = append(prog, elemProg...) 2717 } 2718 // Pad from ptrdata to size. 2719 elemWords := ft.typ.size / ptrSize 2720 if elemPtrs < elemWords { 2721 // Emit literal 0 bit, then repeat as needed. 2722 prog = append(prog, 0x01, 0x00) 2723 if elemPtrs+1 < elemWords { 2724 prog = append(prog, 0x81) 2725 prog = appendVarint(prog, elemWords-elemPtrs-1) 2726 } 2727 } 2728 } 2729 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2730 typ.kind |= kindGCProg 2731 typ.gcdata = &prog[0] 2732 } else { 2733 typ.kind &^= kindGCProg 2734 bv := new(bitVector) 2735 addTypeBits(bv, 0, typ.common()) 2736 if len(bv.data) > 0 { 2737 typ.gcdata = &bv.data[0] 2738 } 2739 } 2740 typ.ptrdata = typeptrdata(typ.common()) 2741 typ.alg = new(typeAlg) 2742 if hashable { 2743 typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr { 2744 o := seed 2745 for _, ft := range typ.fields { 2746 pi := add(p, ft.offset(), "&x.field safe") 2747 o = ft.typ.alg.hash(pi, o) 2748 } 2749 return o 2750 } 2751 } 2752 2753 if comparable { 2754 typ.alg.equal = func(p, q unsafe.Pointer) bool { 2755 for _, ft := range typ.fields { 2756 pi := add(p, ft.offset(), "&x.field safe") 2757 qi := add(q, ft.offset(), "&x.field safe") 2758 if !ft.typ.alg.equal(pi, qi) { 2759 return false 2760 } 2761 } 2762 return true 2763 } 2764 } 2765 2766 switch { 2767 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2768 // structs of 1 direct iface type can be direct 2769 typ.kind |= kindDirectIface 2770 default: 2771 typ.kind &^= kindDirectIface 2772 } 2773 2774 return addToCache(&typ.rtype) 2775 } 2776 2777 func runtimeStructField(field StructField) structField { 2778 if field.PkgPath != "" { 2779 panic("reflect.StructOf: StructOf does not allow unexported fields") 2780 } 2781 2782 // Best-effort check for misuse. 2783 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through. 2784 c := field.Name[0] 2785 if 'a' <= c && c <= 'z' || c == '_' { 2786 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2787 } 2788 2789 offsetEmbed := uintptr(0) 2790 if field.Anonymous { 2791 offsetEmbed |= 1 2792 } 2793 2794 resolveReflectType(field.Type.common()) // install in runtime 2795 return structField{ 2796 name: newName(field.Name, string(field.Tag), true), 2797 typ: field.Type.common(), 2798 offsetEmbed: offsetEmbed, 2799 } 2800 } 2801 2802 // typeptrdata returns the length in bytes of the prefix of t 2803 // containing pointer data. Anything after this offset is scalar data. 2804 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2805 func typeptrdata(t *rtype) uintptr { 2806 if !t.pointers() { 2807 return 0 2808 } 2809 switch t.Kind() { 2810 case Struct: 2811 st := (*structType)(unsafe.Pointer(t)) 2812 // find the last field that has pointers. 2813 field := 0 2814 for i := range st.fields { 2815 ft := st.fields[i].typ 2816 if ft.pointers() { 2817 field = i 2818 } 2819 } 2820 f := st.fields[field] 2821 return f.offset() + f.typ.ptrdata 2822 2823 default: 2824 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2825 } 2826 } 2827 2828 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2829 const maxPtrmaskBytes = 2048 2830 2831 // ArrayOf returns the array type with the given count and element type. 2832 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2833 // 2834 // If the resulting type would be larger than the available address space, 2835 // ArrayOf panics. 2836 func ArrayOf(count int, elem Type) Type { 2837 typ := elem.(*rtype) 2838 2839 // Look in cache. 2840 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2841 if array, ok := lookupCache.Load(ckey); ok { 2842 return array.(Type) 2843 } 2844 2845 // Look in known types. 2846 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2847 for _, tt := range typesByString(s) { 2848 array := (*arrayType)(unsafe.Pointer(tt)) 2849 if array.elem == typ { 2850 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2851 return ti.(Type) 2852 } 2853 } 2854 2855 // Make an array type. 2856 var iarray interface{} = [1]unsafe.Pointer{} 2857 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2858 array := *prototype 2859 array.tflag = 0 2860 array.str = resolveReflectName(newName(s, "", false)) 2861 array.hash = fnv1(typ.hash, '[') 2862 for n := uint32(count); n > 0; n >>= 8 { 2863 array.hash = fnv1(array.hash, byte(n)) 2864 } 2865 array.hash = fnv1(array.hash, ']') 2866 array.elem = typ 2867 array.ptrToThis = 0 2868 if typ.size > 0 { 2869 max := ^uintptr(0) / typ.size 2870 if uintptr(count) > max { 2871 panic("reflect.ArrayOf: array size would exceed virtual address space") 2872 } 2873 } 2874 array.size = typ.size * uintptr(count) 2875 if count > 0 && typ.ptrdata != 0 { 2876 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2877 } 2878 array.align = typ.align 2879 array.fieldAlign = typ.fieldAlign 2880 array.len = uintptr(count) 2881 array.slice = SliceOf(elem).(*rtype) 2882 2883 array.kind &^= kindNoPointers 2884 switch { 2885 case typ.kind&kindNoPointers != 0 || array.size == 0: 2886 // No pointers. 2887 array.kind |= kindNoPointers 2888 array.gcdata = nil 2889 array.ptrdata = 0 2890 2891 case count == 1: 2892 // In memory, 1-element array looks just like the element. 2893 array.kind |= typ.kind & kindGCProg 2894 array.gcdata = typ.gcdata 2895 array.ptrdata = typ.ptrdata 2896 2897 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2898 // Element is small with pointer mask; array is still small. 2899 // Create direct pointer mask by turning each 1 bit in elem 2900 // into count 1 bits in larger mask. 2901 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2902 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2903 elemWords := typ.size / ptrSize 2904 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ { 2905 if (elemMask[j/8]>>(j%8))&1 != 0 { 2906 for i := uintptr(0); i < array.len; i++ { 2907 k := i*elemWords + j 2908 mask[k/8] |= 1 << (k % 8) 2909 } 2910 } 2911 } 2912 array.gcdata = &mask[0] 2913 2914 default: 2915 // Create program that emits one element 2916 // and then repeats to make the array. 2917 prog := []byte{0, 0, 0, 0} // will be length of prog 2918 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2919 elemPtrs := typ.ptrdata / ptrSize 2920 if typ.kind&kindGCProg == 0 { 2921 // Element is small with pointer mask; use as literal bits. 2922 mask := elemGC 2923 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2924 var n uintptr 2925 for n = elemPtrs; n > 120; n -= 120 { 2926 prog = append(prog, 120) 2927 prog = append(prog, mask[:15]...) 2928 mask = mask[15:] 2929 } 2930 prog = append(prog, byte(n)) 2931 prog = append(prog, mask[:(n+7)/8]...) 2932 } else { 2933 // Element has GC program; emit one element. 2934 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2935 prog = append(prog, elemProg...) 2936 } 2937 // Pad from ptrdata to size. 2938 elemWords := typ.size / ptrSize 2939 if elemPtrs < elemWords { 2940 // Emit literal 0 bit, then repeat as needed. 2941 prog = append(prog, 0x01, 0x00) 2942 if elemPtrs+1 < elemWords { 2943 prog = append(prog, 0x81) 2944 prog = appendVarint(prog, elemWords-elemPtrs-1) 2945 } 2946 } 2947 // Repeat count-1 times. 2948 if elemWords < 0x80 { 2949 prog = append(prog, byte(elemWords|0x80)) 2950 } else { 2951 prog = append(prog, 0x80) 2952 prog = appendVarint(prog, elemWords) 2953 } 2954 prog = appendVarint(prog, uintptr(count)-1) 2955 prog = append(prog, 0) 2956 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2957 array.kind |= kindGCProg 2958 array.gcdata = &prog[0] 2959 array.ptrdata = array.size // overestimate but ok; must match program 2960 } 2961 2962 etyp := typ.common() 2963 esize := etyp.Size() 2964 ealg := etyp.alg 2965 2966 array.alg = new(typeAlg) 2967 if ealg.equal != nil { 2968 eequal := ealg.equal 2969 array.alg.equal = func(p, q unsafe.Pointer) bool { 2970 for i := 0; i < count; i++ { 2971 pi := arrayAt(p, i, esize, "i < count") 2972 qi := arrayAt(q, i, esize, "i < count") 2973 if !eequal(pi, qi) { 2974 return false 2975 } 2976 2977 } 2978 return true 2979 } 2980 } 2981 if ealg.hash != nil { 2982 ehash := ealg.hash 2983 array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr { 2984 o := seed 2985 for i := 0; i < count; i++ { 2986 o = ehash(arrayAt(ptr, i, esize, "i < count"), o) 2987 } 2988 return o 2989 } 2990 } 2991 2992 switch { 2993 case count == 1 && !ifaceIndir(typ): 2994 // array of 1 direct iface type can be direct 2995 array.kind |= kindDirectIface 2996 default: 2997 array.kind &^= kindDirectIface 2998 } 2999 3000 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) 3001 return ti.(Type) 3002 } 3003 3004 func appendVarint(x []byte, v uintptr) []byte { 3005 for ; v >= 0x80; v >>= 7 { 3006 x = append(x, byte(v|0x80)) 3007 } 3008 x = append(x, byte(v)) 3009 return x 3010 } 3011 3012 // toType converts from a *rtype to a Type that can be returned 3013 // to the client of package reflect. In gc, the only concern is that 3014 // a nil *rtype must be replaced by a nil Type, but in gccgo this 3015 // function takes care of ensuring that multiple *rtype for the same 3016 // type are coalesced into a single Type. 3017 func toType(t *rtype) Type { 3018 if t == nil { 3019 return nil 3020 } 3021 return t 3022 } 3023 3024 type layoutKey struct { 3025 ftyp *funcType // function signature 3026 rcvr *rtype // receiver type, or nil if none 3027 } 3028 3029 type layoutType struct { 3030 t *rtype 3031 argSize uintptr // size of arguments 3032 retOffset uintptr // offset of return values. 3033 stack *bitVector 3034 framePool *sync.Pool 3035 } 3036 3037 var layoutCache sync.Map // map[layoutKey]layoutType 3038 3039 // funcLayout computes a struct type representing the layout of the 3040 // function arguments and return values for the function type t. 3041 // If rcvr != nil, rcvr specifies the type of the receiver. 3042 // The returned type exists only for GC, so we only fill out GC relevant info. 3043 // Currently, that's just size and the GC program. We also fill in 3044 // the name for possible debugging use. 3045 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3046 if t.Kind() != Func { 3047 panic("reflect: funcLayout of non-func type") 3048 } 3049 if rcvr != nil && rcvr.Kind() == Interface { 3050 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3051 } 3052 k := layoutKey{t, rcvr} 3053 if lti, ok := layoutCache.Load(k); ok { 3054 lt := lti.(layoutType) 3055 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3056 } 3057 3058 // compute gc program & stack bitmap for arguments 3059 ptrmap := new(bitVector) 3060 var offset uintptr 3061 if rcvr != nil { 3062 // Reflect uses the "interface" calling convention for 3063 // methods, where receivers take one word of argument 3064 // space no matter how big they actually are. 3065 if ifaceIndir(rcvr) || rcvr.pointers() { 3066 ptrmap.append(1) 3067 } else { 3068 ptrmap.append(0) 3069 } 3070 offset += ptrSize 3071 } 3072 for _, arg := range t.in() { 3073 offset += -offset & uintptr(arg.align-1) 3074 addTypeBits(ptrmap, offset, arg) 3075 offset += arg.size 3076 } 3077 argSize = offset 3078 if runtime.GOARCH == "amd64p32" { 3079 offset += -offset & (8 - 1) 3080 } 3081 offset += -offset & (ptrSize - 1) 3082 retOffset = offset 3083 for _, res := range t.out() { 3084 offset += -offset & uintptr(res.align-1) 3085 addTypeBits(ptrmap, offset, res) 3086 offset += res.size 3087 } 3088 offset += -offset & (ptrSize - 1) 3089 3090 // build dummy rtype holding gc program 3091 x := &rtype{ 3092 align: ptrSize, 3093 size: offset, 3094 ptrdata: uintptr(ptrmap.n) * ptrSize, 3095 } 3096 if runtime.GOARCH == "amd64p32" { 3097 x.align = 8 3098 } 3099 if ptrmap.n > 0 { 3100 x.gcdata = &ptrmap.data[0] 3101 } else { 3102 x.kind |= kindNoPointers 3103 } 3104 3105 var s string 3106 if rcvr != nil { 3107 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3108 } else { 3109 s = "funcargs(" + t.String() + ")" 3110 } 3111 x.str = resolveReflectName(newName(s, "", false)) 3112 3113 // cache result for future callers 3114 framePool = &sync.Pool{New: func() interface{} { 3115 return unsafe_New(x) 3116 }} 3117 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 3118 t: x, 3119 argSize: argSize, 3120 retOffset: retOffset, 3121 stack: ptrmap, 3122 framePool: framePool, 3123 }) 3124 lt := lti.(layoutType) 3125 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3126 } 3127 3128 // ifaceIndir reports whether t is stored indirectly in an interface value. 3129 func ifaceIndir(t *rtype) bool { 3130 return t.kind&kindDirectIface == 0 3131 } 3132 3133 // Layout matches runtime.gobitvector (well enough). 3134 type bitVector struct { 3135 n uint32 // number of bits 3136 data []byte 3137 } 3138 3139 // append a bit to the bitmap. 3140 func (bv *bitVector) append(bit uint8) { 3141 if bv.n%8 == 0 { 3142 bv.data = append(bv.data, 0) 3143 } 3144 bv.data[bv.n/8] |= bit << (bv.n % 8) 3145 bv.n++ 3146 } 3147 3148 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3149 if t.kind&kindNoPointers != 0 { 3150 return 3151 } 3152 3153 switch Kind(t.kind & kindMask) { 3154 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3155 // 1 pointer at start of representation 3156 for bv.n < uint32(offset/uintptr(ptrSize)) { 3157 bv.append(0) 3158 } 3159 bv.append(1) 3160 3161 case Interface: 3162 // 2 pointers 3163 for bv.n < uint32(offset/uintptr(ptrSize)) { 3164 bv.append(0) 3165 } 3166 bv.append(1) 3167 bv.append(1) 3168 3169 case Array: 3170 // repeat inner type 3171 tt := (*arrayType)(unsafe.Pointer(t)) 3172 for i := 0; i < int(tt.len); i++ { 3173 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3174 } 3175 3176 case Struct: 3177 // apply fields 3178 tt := (*structType)(unsafe.Pointer(t)) 3179 for i := range tt.fields { 3180 f := &tt.fields[i] 3181 addTypeBits(bv, offset+f.offset(), f.typ) 3182 } 3183 } 3184 }