github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "runtime" 20 "strconv" 21 "sync" 22 "unsafe" 23 ) 24 25 // Type is the representation of a Go type. 26 // 27 // Not all methods apply to all kinds of types. Restrictions, 28 // if any, are noted in the documentation for each method. 29 // Use the Kind method to find out the kind of type before 30 // calling kind-specific methods. Calling a method 31 // inappropriate to the kind of type causes a run-time panic. 32 // 33 // Type values are comparable, such as with the == operator. 34 // Two Type values are equal if they represent identical types. 35 type Type interface { 36 // Methods applicable to all types. 37 38 // Align returns the alignment in bytes of a value of 39 // this type when allocated in memory. 40 Align() int 41 42 // FieldAlign returns the alignment in bytes of a value of 43 // this type when used as a field in a struct. 44 FieldAlign() int 45 46 // Method returns the i'th method in the type's method set. 47 // It panics if i is not in the range [0, NumMethod()). 48 // 49 // For a non-interface type T or *T, the returned Method's Type and Func 50 // fields describe a function whose first argument is the receiver. 51 // 52 // For an interface type, the returned Method's Type field gives the 53 // method signature, without a receiver, and the Func field is nil. 54 Method(int) Method 55 56 // MethodByName returns the method with that name in the type's 57 // method set and a boolean indicating if the method was found. 58 // 59 // For a non-interface type T or *T, the returned Method's Type and Func 60 // fields describe a function whose first argument is the receiver. 61 // 62 // For an interface type, the returned Method's Type field gives the 63 // method signature, without a receiver, and the Func field is nil. 64 MethodByName(string) (Method, bool) 65 66 // NumMethod returns the number of exported methods in the type's method set. 67 NumMethod() int 68 69 // Name returns the type's name within its package. 70 // It returns an empty string for unnamed types. 71 Name() string 72 73 // PkgPath returns a named type's package path, that is, the import path 74 // that uniquely identifies the package, such as "encoding/base64". 75 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int), 76 // the package path will be the empty string. 77 PkgPath() string 78 79 // Size returns the number of bytes needed to store 80 // a value of the given type; it is analogous to unsafe.Sizeof. 81 Size() uintptr 82 83 // String returns a string representation of the type. 84 // The string representation may use shortened package names 85 // (e.g., base64 instead of "encoding/base64") and is not 86 // guaranteed to be unique among types. To test for type identity, 87 // compare the Types directly. 88 String() string 89 90 // Kind returns the specific kind of this type. 91 Kind() Kind 92 93 // Implements reports whether the type implements the interface type u. 94 Implements(u Type) bool 95 96 // AssignableTo reports whether a value of the type is assignable to type u. 97 AssignableTo(u Type) bool 98 99 // ConvertibleTo reports whether a value of the type is convertible to type u. 100 ConvertibleTo(u Type) bool 101 102 // Comparable reports whether values of this type are comparable. 103 Comparable() bool 104 105 // Methods applicable only to some types, depending on Kind. 106 // The methods allowed for each kind are: 107 // 108 // Int*, Uint*, Float*, Complex*: Bits 109 // Array: Elem, Len 110 // Chan: ChanDir, Elem 111 // Func: In, NumIn, Out, NumOut, IsVariadic. 112 // Map: Key, Elem 113 // Ptr: Elem 114 // Slice: Elem 115 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 116 117 // Bits returns the size of the type in bits. 118 // It panics if the type's Kind is not one of the 119 // sized or unsized Int, Uint, Float, or Complex kinds. 120 Bits() int 121 122 // ChanDir returns a channel type's direction. 123 // It panics if the type's Kind is not Chan. 124 ChanDir() ChanDir 125 126 // IsVariadic reports whether a function type's final input parameter 127 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 128 // implicit actual type []T. 129 // 130 // For concreteness, if t represents func(x int, y ... float64), then 131 // 132 // t.NumIn() == 2 133 // t.In(0) is the reflect.Type for "int" 134 // t.In(1) is the reflect.Type for "[]float64" 135 // t.IsVariadic() == true 136 // 137 // IsVariadic panics if the type's Kind is not Func. 138 IsVariadic() bool 139 140 // Elem returns a type's element type. 141 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 142 Elem() Type 143 144 // Field returns a struct type's i'th field. 145 // It panics if the type's Kind is not Struct. 146 // It panics if i is not in the range [0, NumField()). 147 Field(i int) StructField 148 149 // FieldByIndex returns the nested field corresponding 150 // to the index sequence. It is equivalent to calling Field 151 // successively for each index i. 152 // It panics if the type's Kind is not Struct. 153 FieldByIndex(index []int) StructField 154 155 // FieldByName returns the struct field with the given name 156 // and a boolean indicating if the field was found. 157 FieldByName(name string) (StructField, bool) 158 159 // FieldByNameFunc returns the struct field with a name 160 // that satisfies the match function and a boolean indicating if 161 // the field was found. 162 // 163 // FieldByNameFunc considers the fields in the struct itself 164 // and then the fields in any anonymous structs, in breadth first order, 165 // stopping at the shallowest nesting depth containing one or more 166 // fields satisfying the match function. If multiple fields at that depth 167 // satisfy the match function, they cancel each other 168 // and FieldByNameFunc returns no match. 169 // This behavior mirrors Go's handling of name lookup in 170 // structs containing anonymous fields. 171 FieldByNameFunc(match func(string) bool) (StructField, bool) 172 173 // In returns the type of a function type's i'th input parameter. 174 // It panics if the type's Kind is not Func. 175 // It panics if i is not in the range [0, NumIn()). 176 In(i int) Type 177 178 // Key returns a map type's key type. 179 // It panics if the type's Kind is not Map. 180 Key() Type 181 182 // Len returns an array type's length. 183 // It panics if the type's Kind is not Array. 184 Len() int 185 186 // NumField returns a struct type's field count. 187 // It panics if the type's Kind is not Struct. 188 NumField() int 189 190 // NumIn returns a function type's input parameter count. 191 // It panics if the type's Kind is not Func. 192 NumIn() int 193 194 // NumOut returns a function type's output parameter count. 195 // It panics if the type's Kind is not Func. 196 NumOut() int 197 198 // Out returns the type of a function type's i'th output parameter. 199 // It panics if the type's Kind is not Func. 200 // It panics if i is not in the range [0, NumOut()). 201 Out(i int) Type 202 203 common() *rtype 204 uncommon() *uncommonType 205 } 206 207 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 208 // if the names are equal, even if they are unexported names originating 209 // in different packages. The practical effect of this is that the result of 210 // t.FieldByName("x") is not well defined if the struct type t contains 211 // multiple fields named x (embedded from different packages). 212 // FieldByName may return one of the fields named x or may report that there are none. 213 // See golang.org/issue/4876 for more details. 214 215 /* 216 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 217 * A few are known to ../runtime/type.go to convey to debuggers. 218 * They are also known to ../runtime/type.go. 219 */ 220 221 // A Kind represents the specific kind of type that a Type represents. 222 // The zero Kind is not a valid kind. 223 type Kind uint 224 225 const ( 226 Invalid Kind = iota 227 Bool 228 Int 229 Int8 230 Int16 231 Int32 232 Int64 233 Uint 234 Uint8 235 Uint16 236 Uint32 237 Uint64 238 Uintptr 239 Float32 240 Float64 241 Complex64 242 Complex128 243 Array 244 Chan 245 Func 246 Interface 247 Map 248 Ptr 249 Slice 250 String 251 Struct 252 UnsafePointer 253 ) 254 255 // tflag is used by an rtype to signal what extra type information is 256 // available in the memory directly following the rtype value. 257 // 258 // tflag values must be kept in sync with copies in: 259 // cmd/compile/internal/gc/reflect.go 260 // cmd/link/internal/ld/decodesym.go 261 // runtime/type.go 262 type tflag uint8 263 264 const ( 265 // tflagUncommon means that there is a pointer, *uncommonType, 266 // just beyond the outer type structure. 267 // 268 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 269 // then t has uncommonType data and it can be accessed as: 270 // 271 // type tUncommon struct { 272 // structType 273 // u uncommonType 274 // } 275 // u := &(*tUncommon)(unsafe.Pointer(t)).u 276 tflagUncommon tflag = 1 << 0 277 278 // tflagExtraStar means the name in the str field has an 279 // extraneous '*' prefix. This is because for most types T in 280 // a program, the type *T also exists and reusing the str data 281 // saves binary size. 282 tflagExtraStar tflag = 1 << 1 283 284 // tflagNamed means the type has a name. 285 tflagNamed tflag = 1 << 2 286 ) 287 288 // rtype is the common implementation of most values. 289 // It is embedded in other, public struct types, but always 290 // with a unique tag like `reflect:"array"` or `reflect:"ptr"` 291 // so that code cannot convert from, say, *arrayType to *ptrType. 292 type rtype struct { 293 size uintptr 294 ptrdata uintptr 295 hash uint32 // hash of type; avoids computation in hash tables 296 tflag tflag // extra type information flags 297 align uint8 // alignment of variable with this type 298 fieldAlign uint8 // alignment of struct field with this type 299 kind uint8 // enumeration for C 300 alg *typeAlg // algorithm table 301 gcdata *byte // garbage collection data 302 str nameOff // string form 303 ptrToThis typeOff // type for pointer to this type, may be zero 304 } 305 306 // a copy of runtime.typeAlg 307 type typeAlg struct { 308 // function for hashing objects of this type 309 // (ptr to object, seed) -> hash 310 hash func(unsafe.Pointer, uintptr) uintptr 311 // function for comparing objects of this type 312 // (ptr to object A, ptr to object B) -> ==? 313 equal func(unsafe.Pointer, unsafe.Pointer) bool 314 } 315 316 // Method on non-interface type 317 type method struct { 318 name nameOff // name of method 319 mtyp typeOff // method type (without receiver) 320 ifn textOff // fn used in interface call (one-word receiver) 321 tfn textOff // fn used for normal method call 322 } 323 324 // uncommonType is present only for types with names or methods 325 // (if T is a named type, the uncommonTypes for T and *T have methods). 326 // Using a pointer to this struct reduces the overall size required 327 // to describe an unnamed type with no methods. 328 type uncommonType struct { 329 pkgPath nameOff // import path; empty for built-in types like int, string 330 mcount uint16 // number of methods 331 _ uint16 // unused 332 moff uint32 // offset from this uncommontype to [mcount]method 333 _ uint32 // unused 334 } 335 336 // ChanDir represents a channel type's direction. 337 type ChanDir int 338 339 const ( 340 RecvDir ChanDir = 1 << iota // <-chan 341 SendDir // chan<- 342 BothDir = RecvDir | SendDir // chan 343 ) 344 345 // arrayType represents a fixed array type. 346 type arrayType struct { 347 rtype `reflect:"array"` 348 elem *rtype // array element type 349 slice *rtype // slice type 350 len uintptr 351 } 352 353 // chanType represents a channel type. 354 type chanType struct { 355 rtype `reflect:"chan"` 356 elem *rtype // channel element type 357 dir uintptr // channel direction (ChanDir) 358 } 359 360 // funcType represents a function type. 361 // 362 // A *rtype for each in and out parameter is stored in an array that 363 // directly follows the funcType (and possibly its uncommonType). So 364 // a function type with one method, one input, and one output is: 365 // 366 // struct { 367 // funcType 368 // uncommonType 369 // [2]*rtype // [0] is in, [1] is out 370 // } 371 type funcType struct { 372 rtype `reflect:"func"` 373 inCount uint16 374 outCount uint16 // top bit is set if last input parameter is ... 375 } 376 377 // imethod represents a method on an interface type 378 type imethod struct { 379 name nameOff // name of method 380 typ typeOff // .(*FuncType) underneath 381 } 382 383 // interfaceType represents an interface type. 384 type interfaceType struct { 385 rtype `reflect:"interface"` 386 pkgPath name // import path 387 methods []imethod // sorted by hash 388 } 389 390 // mapType represents a map type. 391 type mapType struct { 392 rtype `reflect:"map"` 393 key *rtype // map key type 394 elem *rtype // map element (value) type 395 bucket *rtype // internal bucket structure 396 hmap *rtype // internal map header 397 keysize uint8 // size of key slot 398 indirectkey uint8 // store ptr to key instead of key itself 399 valuesize uint8 // size of value slot 400 indirectvalue uint8 // store ptr to value instead of value itself 401 bucketsize uint16 // size of bucket 402 reflexivekey bool // true if k==k for all keys 403 needkeyupdate bool // true if we need to update key on an overwrite 404 } 405 406 // ptrType represents a pointer type. 407 type ptrType struct { 408 rtype `reflect:"ptr"` 409 elem *rtype // pointer element (pointed at) type 410 } 411 412 // sliceType represents a slice type. 413 type sliceType struct { 414 rtype `reflect:"slice"` 415 elem *rtype // slice element type 416 } 417 418 // Struct field 419 type structField struct { 420 name name // name is always non-empty 421 typ *rtype // type of field 422 offsetAnon uintptr // byte offset of field<<1 | isAnonymous 423 } 424 425 func (f *structField) offset() uintptr { 426 return f.offsetAnon >> 1 427 } 428 429 func (f *structField) anon() bool { 430 return f.offsetAnon&1 != 0 431 } 432 433 // structType represents a struct type. 434 type structType struct { 435 rtype `reflect:"struct"` 436 pkgPath name 437 fields []structField // sorted by offset 438 } 439 440 // name is an encoded type name with optional extra data. 441 // 442 // The first byte is a bit field containing: 443 // 444 // 1<<0 the name is exported 445 // 1<<1 tag data follows the name 446 // 1<<2 pkgPath nameOff follows the name and tag 447 // 448 // The next two bytes are the data length: 449 // 450 // l := uint16(data[1])<<8 | uint16(data[2]) 451 // 452 // Bytes [3:3+l] are the string data. 453 // 454 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 455 // with the data following. 456 // 457 // If the import path follows, then 4 bytes at the end of 458 // the data form a nameOff. The import path is only set for concrete 459 // methods that are defined in a different package than their type. 460 // 461 // If a name starts with "*", then the exported bit represents 462 // whether the pointed to type is exported. 463 type name struct { 464 bytes *byte 465 } 466 467 func (n name) data(off int) *byte { 468 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 469 } 470 471 func (n name) isExported() bool { 472 return (*n.bytes)&(1<<0) != 0 473 } 474 475 func (n name) nameLen() int { 476 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 477 } 478 479 func (n name) tagLen() int { 480 if *n.data(0)&(1<<1) == 0 { 481 return 0 482 } 483 off := 3 + n.nameLen() 484 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 485 } 486 487 func (n name) name() (s string) { 488 if n.bytes == nil { 489 return 490 } 491 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 492 493 hdr := (*stringHeader)(unsafe.Pointer(&s)) 494 hdr.Data = unsafe.Pointer(&b[3]) 495 hdr.Len = int(b[1])<<8 | int(b[2]) 496 return s 497 } 498 499 func (n name) tag() (s string) { 500 tl := n.tagLen() 501 if tl == 0 { 502 return "" 503 } 504 nl := n.nameLen() 505 hdr := (*stringHeader)(unsafe.Pointer(&s)) 506 hdr.Data = unsafe.Pointer(n.data(3 + nl + 2)) 507 hdr.Len = tl 508 return s 509 } 510 511 func (n name) pkgPath() string { 512 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 513 return "" 514 } 515 off := 3 + n.nameLen() 516 if tl := n.tagLen(); tl > 0 { 517 off += 2 + tl 518 } 519 var nameOff int32 520 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 521 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 522 return pkgPathName.name() 523 } 524 525 // round n up to a multiple of a. a must be a power of 2. 526 func round(n, a uintptr) uintptr { 527 return (n + a - 1) &^ (a - 1) 528 } 529 530 func newName(n, tag, pkgPath string, exported bool) name { 531 if len(n) > 1<<16-1 { 532 panic("reflect.nameFrom: name too long: " + n) 533 } 534 if len(tag) > 1<<16-1 { 535 panic("reflect.nameFrom: tag too long: " + tag) 536 } 537 538 var bits byte 539 l := 1 + 2 + len(n) 540 if exported { 541 bits |= 1 << 0 542 } 543 if len(tag) > 0 { 544 l += 2 + len(tag) 545 bits |= 1 << 1 546 } 547 if pkgPath != "" { 548 bits |= 1 << 2 549 } 550 551 b := make([]byte, l) 552 b[0] = bits 553 b[1] = uint8(len(n) >> 8) 554 b[2] = uint8(len(n)) 555 copy(b[3:], n) 556 if len(tag) > 0 { 557 tb := b[3+len(n):] 558 tb[0] = uint8(len(tag) >> 8) 559 tb[1] = uint8(len(tag)) 560 copy(tb[2:], tag) 561 } 562 563 if pkgPath != "" { 564 panic("reflect: creating a name with a package path is not supported") 565 } 566 567 return name{bytes: &b[0]} 568 } 569 570 /* 571 * The compiler knows the exact layout of all the data structures above. 572 * The compiler does not know about the data structures and methods below. 573 */ 574 575 // Method represents a single method. 576 type Method struct { 577 // Name is the method name. 578 // PkgPath is the package path that qualifies a lower case (unexported) 579 // method name. It is empty for upper case (exported) method names. 580 // The combination of PkgPath and Name uniquely identifies a method 581 // in a method set. 582 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 583 Name string 584 PkgPath string 585 586 Type Type // method type 587 Func Value // func with receiver as first argument 588 Index int // index for Type.Method 589 } 590 591 const ( 592 kindDirectIface = 1 << 5 593 kindGCProg = 1 << 6 // Type.gc points to GC program 594 kindNoPointers = 1 << 7 595 kindMask = (1 << 5) - 1 596 ) 597 598 func (k Kind) String() string { 599 if int(k) < len(kindNames) { 600 return kindNames[k] 601 } 602 return "kind" + strconv.Itoa(int(k)) 603 } 604 605 var kindNames = []string{ 606 Invalid: "invalid", 607 Bool: "bool", 608 Int: "int", 609 Int8: "int8", 610 Int16: "int16", 611 Int32: "int32", 612 Int64: "int64", 613 Uint: "uint", 614 Uint8: "uint8", 615 Uint16: "uint16", 616 Uint32: "uint32", 617 Uint64: "uint64", 618 Uintptr: "uintptr", 619 Float32: "float32", 620 Float64: "float64", 621 Complex64: "complex64", 622 Complex128: "complex128", 623 Array: "array", 624 Chan: "chan", 625 Func: "func", 626 Interface: "interface", 627 Map: "map", 628 Ptr: "ptr", 629 Slice: "slice", 630 String: "string", 631 Struct: "struct", 632 UnsafePointer: "unsafe.Pointer", 633 } 634 635 func (t *uncommonType) methods() []method { 636 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount] 637 } 638 639 // resolveNameOff resolves a name offset from a base pointer. 640 // The (*rtype).nameOff method is a convenience wrapper for this function. 641 // Implemented in the runtime package. 642 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 643 644 // resolveTypeOff resolves an *rtype offset from a base type. 645 // The (*rtype).typeOff method is a convenience wrapper for this function. 646 // Implemented in the runtime package. 647 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 648 649 // resolveTextOff resolves an function pointer offset from a base type. 650 // The (*rtype).textOff method is a convenience wrapper for this function. 651 // Implemented in the runtime package. 652 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 653 654 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 655 // It returns a new ID that can be used as a typeOff or textOff, and will 656 // be resolved correctly. Implemented in the runtime package. 657 func addReflectOff(ptr unsafe.Pointer) int32 658 659 // resolveReflectType adds a name to the reflection lookup map in the runtime. 660 // It returns a new nameOff that can be used to refer to the pointer. 661 func resolveReflectName(n name) nameOff { 662 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 663 } 664 665 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 666 // It returns a new typeOff that can be used to refer to the pointer. 667 func resolveReflectType(t *rtype) typeOff { 668 return typeOff(addReflectOff(unsafe.Pointer(t))) 669 } 670 671 // resolveReflectText adds a function pointer to the reflection lookup map in 672 // the runtime. It returns a new textOff that can be used to refer to the 673 // pointer. 674 func resolveReflectText(ptr unsafe.Pointer) textOff { 675 return textOff(addReflectOff(ptr)) 676 } 677 678 type nameOff int32 // offset to a name 679 type typeOff int32 // offset to an *rtype 680 type textOff int32 // offset from top of text section 681 682 func (t *rtype) nameOff(off nameOff) name { 683 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 684 } 685 686 func (t *rtype) typeOff(off typeOff) *rtype { 687 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 688 } 689 690 func (t *rtype) textOff(off textOff) unsafe.Pointer { 691 return resolveTextOff(unsafe.Pointer(t), int32(off)) 692 } 693 694 func (t *rtype) uncommon() *uncommonType { 695 if t.tflag&tflagUncommon == 0 { 696 return nil 697 } 698 switch t.Kind() { 699 case Struct: 700 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 701 case Ptr: 702 type u struct { 703 ptrType 704 u uncommonType 705 } 706 return &(*u)(unsafe.Pointer(t)).u 707 case Func: 708 type u struct { 709 funcType 710 u uncommonType 711 } 712 return &(*u)(unsafe.Pointer(t)).u 713 case Slice: 714 type u struct { 715 sliceType 716 u uncommonType 717 } 718 return &(*u)(unsafe.Pointer(t)).u 719 case Array: 720 type u struct { 721 arrayType 722 u uncommonType 723 } 724 return &(*u)(unsafe.Pointer(t)).u 725 case Chan: 726 type u struct { 727 chanType 728 u uncommonType 729 } 730 return &(*u)(unsafe.Pointer(t)).u 731 case Map: 732 type u struct { 733 mapType 734 u uncommonType 735 } 736 return &(*u)(unsafe.Pointer(t)).u 737 case Interface: 738 type u struct { 739 interfaceType 740 u uncommonType 741 } 742 return &(*u)(unsafe.Pointer(t)).u 743 default: 744 type u struct { 745 rtype 746 u uncommonType 747 } 748 return &(*u)(unsafe.Pointer(t)).u 749 } 750 } 751 752 func (t *rtype) String() string { 753 s := t.nameOff(t.str).name() 754 if t.tflag&tflagExtraStar != 0 { 755 return s[1:] 756 } 757 return s 758 } 759 760 func (t *rtype) Size() uintptr { return t.size } 761 762 func (t *rtype) Bits() int { 763 if t == nil { 764 panic("reflect: Bits of nil Type") 765 } 766 k := t.Kind() 767 if k < Int || k > Complex128 { 768 panic("reflect: Bits of non-arithmetic Type " + t.String()) 769 } 770 return int(t.size) * 8 771 } 772 773 func (t *rtype) Align() int { return int(t.align) } 774 775 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 776 777 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 778 779 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 } 780 781 func (t *rtype) common() *rtype { return t } 782 783 var methodCache struct { 784 sync.RWMutex 785 m map[*rtype][]method 786 } 787 788 func (t *rtype) exportedMethods() []method { 789 methodCache.RLock() 790 methods, found := methodCache.m[t] 791 methodCache.RUnlock() 792 793 if found { 794 return methods 795 } 796 797 ut := t.uncommon() 798 if ut == nil { 799 return nil 800 } 801 allm := ut.methods() 802 allExported := true 803 for _, m := range allm { 804 name := t.nameOff(m.name) 805 if !name.isExported() { 806 allExported = false 807 break 808 } 809 } 810 if allExported { 811 methods = allm 812 } else { 813 methods = make([]method, 0, len(allm)) 814 for _, m := range allm { 815 name := t.nameOff(m.name) 816 if name.isExported() { 817 methods = append(methods, m) 818 } 819 } 820 methods = methods[:len(methods):len(methods)] 821 } 822 823 methodCache.Lock() 824 if methodCache.m == nil { 825 methodCache.m = make(map[*rtype][]method) 826 } 827 methodCache.m[t] = methods 828 methodCache.Unlock() 829 830 return methods 831 } 832 833 func (t *rtype) NumMethod() int { 834 if t.Kind() == Interface { 835 tt := (*interfaceType)(unsafe.Pointer(t)) 836 return tt.NumMethod() 837 } 838 if t.tflag&tflagUncommon == 0 { 839 return 0 // avoid methodCache lock in zero case 840 } 841 return len(t.exportedMethods()) 842 } 843 844 func (t *rtype) Method(i int) (m Method) { 845 if t.Kind() == Interface { 846 tt := (*interfaceType)(unsafe.Pointer(t)) 847 return tt.Method(i) 848 } 849 methods := t.exportedMethods() 850 if i < 0 || i >= len(methods) { 851 panic("reflect: Method index out of range") 852 } 853 p := methods[i] 854 pname := t.nameOff(p.name) 855 m.Name = pname.name() 856 fl := flag(Func) 857 mtyp := t.typeOff(p.mtyp) 858 ft := (*funcType)(unsafe.Pointer(mtyp)) 859 in := make([]Type, 0, 1+len(ft.in())) 860 in = append(in, t) 861 for _, arg := range ft.in() { 862 in = append(in, arg) 863 } 864 out := make([]Type, 0, len(ft.out())) 865 for _, ret := range ft.out() { 866 out = append(out, ret) 867 } 868 mt := FuncOf(in, out, ft.IsVariadic()) 869 m.Type = mt 870 tfn := t.textOff(p.tfn) 871 fn := unsafe.Pointer(&tfn) 872 m.Func = Value{mt.(*rtype), fn, fl} 873 874 m.Index = i 875 return m 876 } 877 878 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 879 if t.Kind() == Interface { 880 tt := (*interfaceType)(unsafe.Pointer(t)) 881 return tt.MethodByName(name) 882 } 883 ut := t.uncommon() 884 if ut == nil { 885 return Method{}, false 886 } 887 utmethods := ut.methods() 888 for i := 0; i < int(ut.mcount); i++ { 889 p := utmethods[i] 890 pname := t.nameOff(p.name) 891 if pname.isExported() && pname.name() == name { 892 return t.Method(i), true 893 } 894 } 895 return Method{}, false 896 } 897 898 func (t *rtype) PkgPath() string { 899 if t.tflag&tflagNamed == 0 { 900 return "" 901 } 902 ut := t.uncommon() 903 if ut == nil { 904 return "" 905 } 906 return t.nameOff(ut.pkgPath).name() 907 } 908 909 func hasPrefix(s, prefix string) bool { 910 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 911 } 912 913 func (t *rtype) Name() string { 914 if t.tflag&tflagNamed == 0 { 915 return "" 916 } 917 s := t.String() 918 i := len(s) - 1 919 for i >= 0 { 920 if s[i] == '.' { 921 break 922 } 923 i-- 924 } 925 return s[i+1:] 926 } 927 928 func (t *rtype) ChanDir() ChanDir { 929 if t.Kind() != Chan { 930 panic("reflect: ChanDir of non-chan type") 931 } 932 tt := (*chanType)(unsafe.Pointer(t)) 933 return ChanDir(tt.dir) 934 } 935 936 func (t *rtype) IsVariadic() bool { 937 if t.Kind() != Func { 938 panic("reflect: IsVariadic of non-func type") 939 } 940 tt := (*funcType)(unsafe.Pointer(t)) 941 return tt.outCount&(1<<15) != 0 942 } 943 944 func (t *rtype) Elem() Type { 945 switch t.Kind() { 946 case Array: 947 tt := (*arrayType)(unsafe.Pointer(t)) 948 return toType(tt.elem) 949 case Chan: 950 tt := (*chanType)(unsafe.Pointer(t)) 951 return toType(tt.elem) 952 case Map: 953 tt := (*mapType)(unsafe.Pointer(t)) 954 return toType(tt.elem) 955 case Ptr: 956 tt := (*ptrType)(unsafe.Pointer(t)) 957 return toType(tt.elem) 958 case Slice: 959 tt := (*sliceType)(unsafe.Pointer(t)) 960 return toType(tt.elem) 961 } 962 panic("reflect: Elem of invalid type") 963 } 964 965 func (t *rtype) Field(i int) StructField { 966 if t.Kind() != Struct { 967 panic("reflect: Field of non-struct type") 968 } 969 tt := (*structType)(unsafe.Pointer(t)) 970 return tt.Field(i) 971 } 972 973 func (t *rtype) FieldByIndex(index []int) StructField { 974 if t.Kind() != Struct { 975 panic("reflect: FieldByIndex of non-struct type") 976 } 977 tt := (*structType)(unsafe.Pointer(t)) 978 return tt.FieldByIndex(index) 979 } 980 981 func (t *rtype) FieldByName(name string) (StructField, bool) { 982 if t.Kind() != Struct { 983 panic("reflect: FieldByName of non-struct type") 984 } 985 tt := (*structType)(unsafe.Pointer(t)) 986 return tt.FieldByName(name) 987 } 988 989 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 990 if t.Kind() != Struct { 991 panic("reflect: FieldByNameFunc of non-struct type") 992 } 993 tt := (*structType)(unsafe.Pointer(t)) 994 return tt.FieldByNameFunc(match) 995 } 996 997 func (t *rtype) In(i int) Type { 998 if t.Kind() != Func { 999 panic("reflect: In of non-func type") 1000 } 1001 tt := (*funcType)(unsafe.Pointer(t)) 1002 return toType(tt.in()[i]) 1003 } 1004 1005 func (t *rtype) Key() Type { 1006 if t.Kind() != Map { 1007 panic("reflect: Key of non-map type") 1008 } 1009 tt := (*mapType)(unsafe.Pointer(t)) 1010 return toType(tt.key) 1011 } 1012 1013 func (t *rtype) Len() int { 1014 if t.Kind() != Array { 1015 panic("reflect: Len of non-array type") 1016 } 1017 tt := (*arrayType)(unsafe.Pointer(t)) 1018 return int(tt.len) 1019 } 1020 1021 func (t *rtype) NumField() int { 1022 if t.Kind() != Struct { 1023 panic("reflect: NumField of non-struct type") 1024 } 1025 tt := (*structType)(unsafe.Pointer(t)) 1026 return len(tt.fields) 1027 } 1028 1029 func (t *rtype) NumIn() int { 1030 if t.Kind() != Func { 1031 panic("reflect: NumIn of non-func type") 1032 } 1033 tt := (*funcType)(unsafe.Pointer(t)) 1034 return int(tt.inCount) 1035 } 1036 1037 func (t *rtype) NumOut() int { 1038 if t.Kind() != Func { 1039 panic("reflect: NumOut of non-func type") 1040 } 1041 tt := (*funcType)(unsafe.Pointer(t)) 1042 return len(tt.out()) 1043 } 1044 1045 func (t *rtype) Out(i int) Type { 1046 if t.Kind() != Func { 1047 panic("reflect: Out of non-func type") 1048 } 1049 tt := (*funcType)(unsafe.Pointer(t)) 1050 return toType(tt.out()[i]) 1051 } 1052 1053 func (t *funcType) in() []*rtype { 1054 uadd := unsafe.Sizeof(*t) 1055 if t.tflag&tflagUncommon != 0 { 1056 uadd += unsafe.Sizeof(uncommonType{}) 1057 } 1058 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount] 1059 } 1060 1061 func (t *funcType) out() []*rtype { 1062 uadd := unsafe.Sizeof(*t) 1063 if t.tflag&tflagUncommon != 0 { 1064 uadd += unsafe.Sizeof(uncommonType{}) 1065 } 1066 outCount := t.outCount & (1<<15 - 1) 1067 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 1068 } 1069 1070 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { 1071 return unsafe.Pointer(uintptr(p) + x) 1072 } 1073 1074 func (d ChanDir) String() string { 1075 switch d { 1076 case SendDir: 1077 return "chan<-" 1078 case RecvDir: 1079 return "<-chan" 1080 case BothDir: 1081 return "chan" 1082 } 1083 return "ChanDir" + strconv.Itoa(int(d)) 1084 } 1085 1086 // Method returns the i'th method in the type's method set. 1087 func (t *interfaceType) Method(i int) (m Method) { 1088 if i < 0 || i >= len(t.methods) { 1089 return 1090 } 1091 p := &t.methods[i] 1092 pname := t.nameOff(p.name) 1093 m.Name = pname.name() 1094 if !pname.isExported() { 1095 m.PkgPath = pname.pkgPath() 1096 if m.PkgPath == "" { 1097 m.PkgPath = t.pkgPath.name() 1098 } 1099 } 1100 m.Type = toType(t.typeOff(p.typ)) 1101 m.Index = i 1102 return 1103 } 1104 1105 // NumMethod returns the number of interface methods in the type's method set. 1106 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1107 1108 // MethodByName method with the given name in the type's method set. 1109 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1110 if t == nil { 1111 return 1112 } 1113 var p *imethod 1114 for i := range t.methods { 1115 p = &t.methods[i] 1116 if t.nameOff(p.name).name() == name { 1117 return t.Method(i), true 1118 } 1119 } 1120 return 1121 } 1122 1123 // A StructField describes a single field in a struct. 1124 type StructField struct { 1125 // Name is the field name. 1126 Name string 1127 // PkgPath is the package path that qualifies a lower case (unexported) 1128 // field name. It is empty for upper case (exported) field names. 1129 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1130 PkgPath string 1131 1132 Type Type // field type 1133 Tag StructTag // field tag string 1134 Offset uintptr // offset within struct, in bytes 1135 Index []int // index sequence for Type.FieldByIndex 1136 Anonymous bool // is an embedded field 1137 } 1138 1139 // A StructTag is the tag string in a struct field. 1140 // 1141 // By convention, tag strings are a concatenation of 1142 // optionally space-separated key:"value" pairs. 1143 // Each key is a non-empty string consisting of non-control 1144 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1145 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1146 // characters and Go string literal syntax. 1147 type StructTag string 1148 1149 // Get returns the value associated with key in the tag string. 1150 // If there is no such key in the tag, Get returns the empty string. 1151 // If the tag does not have the conventional format, the value 1152 // returned by Get is unspecified. To determine whether a tag is 1153 // explicitly set to the empty string, use Lookup. 1154 func (tag StructTag) Get(key string) string { 1155 v, _ := tag.Lookup(key) 1156 return v 1157 } 1158 1159 // Lookup returns the value associated with key in the tag string. 1160 // If the key is present in the tag the value (which may be empty) 1161 // is returned. Otherwise the returned value will be the empty string. 1162 // The ok return value reports whether the value was explicitly set in 1163 // the tag string. If the tag does not have the conventional format, 1164 // the value returned by Lookup is unspecified. 1165 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1166 // When modifying this code, also update the validateStructTag code 1167 // in cmd/vet/structtag.go. 1168 1169 for tag != "" { 1170 // Skip leading space. 1171 i := 0 1172 for i < len(tag) && tag[i] == ' ' { 1173 i++ 1174 } 1175 tag = tag[i:] 1176 if tag == "" { 1177 break 1178 } 1179 1180 // Scan to colon. A space, a quote or a control character is a syntax error. 1181 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1182 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1183 // as it is simpler to inspect the tag's bytes than the tag's runes. 1184 i = 0 1185 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1186 i++ 1187 } 1188 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1189 break 1190 } 1191 name := string(tag[:i]) 1192 tag = tag[i+1:] 1193 1194 // Scan quoted string to find value. 1195 i = 1 1196 for i < len(tag) && tag[i] != '"' { 1197 if tag[i] == '\\' { 1198 i++ 1199 } 1200 i++ 1201 } 1202 if i >= len(tag) { 1203 break 1204 } 1205 qvalue := string(tag[:i+1]) 1206 tag = tag[i+1:] 1207 1208 if key == name { 1209 value, err := strconv.Unquote(qvalue) 1210 if err != nil { 1211 break 1212 } 1213 return value, true 1214 } 1215 } 1216 return "", false 1217 } 1218 1219 // Field returns the i'th struct field. 1220 func (t *structType) Field(i int) (f StructField) { 1221 if i < 0 || i >= len(t.fields) { 1222 panic("reflect: Field index out of bounds") 1223 } 1224 p := &t.fields[i] 1225 f.Type = toType(p.typ) 1226 f.Name = p.name.name() 1227 f.Anonymous = p.anon() 1228 if !p.name.isExported() { 1229 f.PkgPath = p.name.pkgPath() 1230 if f.PkgPath == "" { 1231 f.PkgPath = t.pkgPath.name() 1232 } 1233 } 1234 if tag := p.name.tag(); tag != "" { 1235 f.Tag = StructTag(tag) 1236 } 1237 f.Offset = p.offset() 1238 1239 // NOTE(rsc): This is the only allocation in the interface 1240 // presented by a reflect.Type. It would be nice to avoid, 1241 // at least in the common cases, but we need to make sure 1242 // that misbehaving clients of reflect cannot affect other 1243 // uses of reflect. One possibility is CL 5371098, but we 1244 // postponed that ugliness until there is a demonstrated 1245 // need for the performance. This is issue 2320. 1246 f.Index = []int{i} 1247 return 1248 } 1249 1250 // TODO(gri): Should there be an error/bool indicator if the index 1251 // is wrong for FieldByIndex? 1252 1253 // FieldByIndex returns the nested field corresponding to index. 1254 func (t *structType) FieldByIndex(index []int) (f StructField) { 1255 f.Type = toType(&t.rtype) 1256 for i, x := range index { 1257 if i > 0 { 1258 ft := f.Type 1259 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1260 ft = ft.Elem() 1261 } 1262 f.Type = ft 1263 } 1264 f = f.Type.Field(x) 1265 } 1266 return 1267 } 1268 1269 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1270 type fieldScan struct { 1271 typ *structType 1272 index []int 1273 } 1274 1275 // FieldByNameFunc returns the struct field with a name that satisfies the 1276 // match function and a boolean to indicate if the field was found. 1277 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1278 // This uses the same condition that the Go language does: there must be a unique instance 1279 // of the match at a given depth level. If there are multiple instances of a match at the 1280 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1281 // The algorithm is breadth first search, one depth level at a time. 1282 1283 // The current and next slices are work queues: 1284 // current lists the fields to visit on this depth level, 1285 // and next lists the fields on the next lower level. 1286 current := []fieldScan{} 1287 next := []fieldScan{{typ: t}} 1288 1289 // nextCount records the number of times an embedded type has been 1290 // encountered and considered for queueing in the 'next' slice. 1291 // We only queue the first one, but we increment the count on each. 1292 // If a struct type T can be reached more than once at a given depth level, 1293 // then it annihilates itself and need not be considered at all when we 1294 // process that next depth level. 1295 var nextCount map[*structType]int 1296 1297 // visited records the structs that have been considered already. 1298 // Embedded pointer fields can create cycles in the graph of 1299 // reachable embedded types; visited avoids following those cycles. 1300 // It also avoids duplicated effort: if we didn't find the field in an 1301 // embedded type T at level 2, we won't find it in one at level 4 either. 1302 visited := map[*structType]bool{} 1303 1304 for len(next) > 0 { 1305 current, next = next, current[:0] 1306 count := nextCount 1307 nextCount = nil 1308 1309 // Process all the fields at this depth, now listed in 'current'. 1310 // The loop queues embedded fields found in 'next', for processing during the next 1311 // iteration. The multiplicity of the 'current' field counts is recorded 1312 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1313 for _, scan := range current { 1314 t := scan.typ 1315 if visited[t] { 1316 // We've looked through this type before, at a higher level. 1317 // That higher level would shadow the lower level we're now at, 1318 // so this one can't be useful to us. Ignore it. 1319 continue 1320 } 1321 visited[t] = true 1322 for i := range t.fields { 1323 f := &t.fields[i] 1324 // Find name and (for anonymous field) type for field f. 1325 fname := f.name.name() 1326 var ntyp *rtype 1327 if f.anon() { 1328 // Anonymous field of type T or *T. 1329 ntyp = f.typ 1330 if ntyp.Kind() == Ptr { 1331 ntyp = ntyp.Elem().common() 1332 } 1333 } 1334 1335 // Does it match? 1336 if match(fname) { 1337 // Potential match 1338 if count[t] > 1 || ok { 1339 // Name appeared multiple times at this level: annihilate. 1340 return StructField{}, false 1341 } 1342 result = t.Field(i) 1343 result.Index = nil 1344 result.Index = append(result.Index, scan.index...) 1345 result.Index = append(result.Index, i) 1346 ok = true 1347 continue 1348 } 1349 1350 // Queue embedded struct fields for processing with next level, 1351 // but only if we haven't seen a match yet at this level and only 1352 // if the embedded types haven't already been queued. 1353 if ok || ntyp == nil || ntyp.Kind() != Struct { 1354 continue 1355 } 1356 styp := (*structType)(unsafe.Pointer(ntyp)) 1357 if nextCount[styp] > 0 { 1358 nextCount[styp] = 2 // exact multiple doesn't matter 1359 continue 1360 } 1361 if nextCount == nil { 1362 nextCount = map[*structType]int{} 1363 } 1364 nextCount[styp] = 1 1365 if count[t] > 1 { 1366 nextCount[styp] = 2 // exact multiple doesn't matter 1367 } 1368 var index []int 1369 index = append(index, scan.index...) 1370 index = append(index, i) 1371 next = append(next, fieldScan{styp, index}) 1372 } 1373 } 1374 if ok { 1375 break 1376 } 1377 } 1378 return 1379 } 1380 1381 // FieldByName returns the struct field with the given name 1382 // and a boolean to indicate if the field was found. 1383 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1384 // Quick check for top-level name, or struct without anonymous fields. 1385 hasAnon := false 1386 if name != "" { 1387 for i := range t.fields { 1388 tf := &t.fields[i] 1389 if tf.name.name() == name { 1390 return t.Field(i), true 1391 } 1392 if tf.anon() { 1393 hasAnon = true 1394 } 1395 } 1396 } 1397 if !hasAnon { 1398 return 1399 } 1400 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1401 } 1402 1403 // TypeOf returns the reflection Type that represents the dynamic type of i. 1404 // If i is a nil interface value, TypeOf returns nil. 1405 func TypeOf(i interface{}) Type { 1406 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1407 return toType(eface.typ) 1408 } 1409 1410 // ptrMap is the cache for PtrTo. 1411 var ptrMap struct { 1412 sync.RWMutex 1413 m map[*rtype]*ptrType 1414 } 1415 1416 // PtrTo returns the pointer type with element t. 1417 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1418 func PtrTo(t Type) Type { 1419 return t.(*rtype).ptrTo() 1420 } 1421 1422 func (t *rtype) ptrTo() *rtype { 1423 if t.ptrToThis != 0 { 1424 return t.typeOff(t.ptrToThis) 1425 } 1426 1427 // Check the cache. 1428 ptrMap.RLock() 1429 if m := ptrMap.m; m != nil { 1430 if p := m[t]; p != nil { 1431 ptrMap.RUnlock() 1432 return &p.rtype 1433 } 1434 } 1435 ptrMap.RUnlock() 1436 1437 ptrMap.Lock() 1438 if ptrMap.m == nil { 1439 ptrMap.m = make(map[*rtype]*ptrType) 1440 } 1441 p := ptrMap.m[t] 1442 if p != nil { 1443 // some other goroutine won the race and created it 1444 ptrMap.Unlock() 1445 return &p.rtype 1446 } 1447 1448 // Look in known types. 1449 s := "*" + t.String() 1450 for _, tt := range typesByString(s) { 1451 p = (*ptrType)(unsafe.Pointer(tt)) 1452 if p.elem == t { 1453 ptrMap.m[t] = p 1454 ptrMap.Unlock() 1455 return &p.rtype 1456 } 1457 } 1458 1459 // Create a new ptrType starting with the description 1460 // of an *unsafe.Pointer. 1461 var iptr interface{} = (*unsafe.Pointer)(nil) 1462 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1463 pp := *prototype 1464 1465 pp.str = resolveReflectName(newName(s, "", "", false)) 1466 pp.ptrToThis = 0 1467 1468 // For the type structures linked into the binary, the 1469 // compiler provides a good hash of the string. 1470 // Create a good hash for the new string by using 1471 // the FNV-1 hash's mixing function to combine the 1472 // old hash and the new "*". 1473 pp.hash = fnv1(t.hash, '*') 1474 1475 pp.elem = t 1476 1477 ptrMap.m[t] = &pp 1478 ptrMap.Unlock() 1479 return &pp.rtype 1480 } 1481 1482 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1483 func fnv1(x uint32, list ...byte) uint32 { 1484 for _, b := range list { 1485 x = x*16777619 ^ uint32(b) 1486 } 1487 return x 1488 } 1489 1490 func (t *rtype) Implements(u Type) bool { 1491 if u == nil { 1492 panic("reflect: nil type passed to Type.Implements") 1493 } 1494 if u.Kind() != Interface { 1495 panic("reflect: non-interface type passed to Type.Implements") 1496 } 1497 return implements(u.(*rtype), t) 1498 } 1499 1500 func (t *rtype) AssignableTo(u Type) bool { 1501 if u == nil { 1502 panic("reflect: nil type passed to Type.AssignableTo") 1503 } 1504 uu := u.(*rtype) 1505 return directlyAssignable(uu, t) || implements(uu, t) 1506 } 1507 1508 func (t *rtype) ConvertibleTo(u Type) bool { 1509 if u == nil { 1510 panic("reflect: nil type passed to Type.ConvertibleTo") 1511 } 1512 uu := u.(*rtype) 1513 return convertOp(uu, t) != nil 1514 } 1515 1516 func (t *rtype) Comparable() bool { 1517 return t.alg != nil && t.alg.equal != nil 1518 } 1519 1520 // implements reports whether the type V implements the interface type T. 1521 func implements(T, V *rtype) bool { 1522 if T.Kind() != Interface { 1523 return false 1524 } 1525 t := (*interfaceType)(unsafe.Pointer(T)) 1526 if len(t.methods) == 0 { 1527 return true 1528 } 1529 1530 // The same algorithm applies in both cases, but the 1531 // method tables for an interface type and a concrete type 1532 // are different, so the code is duplicated. 1533 // In both cases the algorithm is a linear scan over the two 1534 // lists - T's methods and V's methods - simultaneously. 1535 // Since method tables are stored in a unique sorted order 1536 // (alphabetical, with no duplicate method names), the scan 1537 // through V's methods must hit a match for each of T's 1538 // methods along the way, or else V does not implement T. 1539 // This lets us run the scan in overall linear time instead of 1540 // the quadratic time a naive search would require. 1541 // See also ../runtime/iface.go. 1542 if V.Kind() == Interface { 1543 v := (*interfaceType)(unsafe.Pointer(V)) 1544 i := 0 1545 for j := 0; j < len(v.methods); j++ { 1546 tm := &t.methods[i] 1547 vm := &v.methods[j] 1548 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1549 if i++; i >= len(t.methods) { 1550 return true 1551 } 1552 } 1553 } 1554 return false 1555 } 1556 1557 v := V.uncommon() 1558 if v == nil { 1559 return false 1560 } 1561 i := 0 1562 vmethods := v.methods() 1563 for j := 0; j < int(v.mcount); j++ { 1564 tm := &t.methods[i] 1565 vm := vmethods[j] 1566 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1567 if i++; i >= len(t.methods) { 1568 return true 1569 } 1570 } 1571 } 1572 return false 1573 } 1574 1575 // directlyAssignable reports whether a value x of type V can be directly 1576 // assigned (using memmove) to a value of type T. 1577 // https://golang.org/doc/go_spec.html#Assignability 1578 // Ignoring the interface rules (implemented elsewhere) 1579 // and the ideal constant rules (no ideal constants at run time). 1580 func directlyAssignable(T, V *rtype) bool { 1581 // x's type V is identical to T? 1582 if T == V { 1583 return true 1584 } 1585 1586 // Otherwise at least one of T and V must be unnamed 1587 // and they must have the same kind. 1588 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { 1589 return false 1590 } 1591 1592 // x's type T and V must have identical underlying types. 1593 return haveIdenticalUnderlyingType(T, V, true) 1594 } 1595 1596 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1597 if cmpTags { 1598 return T == V 1599 } 1600 1601 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1602 return false 1603 } 1604 1605 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1606 } 1607 1608 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1609 if T == V { 1610 return true 1611 } 1612 1613 kind := T.Kind() 1614 if kind != V.Kind() { 1615 return false 1616 } 1617 1618 // Non-composite types of equal kind have same underlying type 1619 // (the predefined instance of the type). 1620 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1621 return true 1622 } 1623 1624 // Composite types. 1625 switch kind { 1626 case Array: 1627 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1628 1629 case Chan: 1630 // Special case: 1631 // x is a bidirectional channel value, T is a channel type, 1632 // and x's type V and T have identical element types. 1633 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) { 1634 return true 1635 } 1636 1637 // Otherwise continue test for identical underlying type. 1638 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1639 1640 case Func: 1641 t := (*funcType)(unsafe.Pointer(T)) 1642 v := (*funcType)(unsafe.Pointer(V)) 1643 if t.outCount != v.outCount || t.inCount != v.inCount { 1644 return false 1645 } 1646 for i := 0; i < t.NumIn(); i++ { 1647 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1648 return false 1649 } 1650 } 1651 for i := 0; i < t.NumOut(); i++ { 1652 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1653 return false 1654 } 1655 } 1656 return true 1657 1658 case Interface: 1659 t := (*interfaceType)(unsafe.Pointer(T)) 1660 v := (*interfaceType)(unsafe.Pointer(V)) 1661 if len(t.methods) == 0 && len(v.methods) == 0 { 1662 return true 1663 } 1664 // Might have the same methods but still 1665 // need a run time conversion. 1666 return false 1667 1668 case Map: 1669 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1670 1671 case Ptr, Slice: 1672 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1673 1674 case Struct: 1675 t := (*structType)(unsafe.Pointer(T)) 1676 v := (*structType)(unsafe.Pointer(V)) 1677 if len(t.fields) != len(v.fields) { 1678 return false 1679 } 1680 for i := range t.fields { 1681 tf := &t.fields[i] 1682 vf := &v.fields[i] 1683 if tf.name.name() != vf.name.name() { 1684 return false 1685 } 1686 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1687 return false 1688 } 1689 if cmpTags && tf.name.tag() != vf.name.tag() { 1690 return false 1691 } 1692 if tf.offsetAnon != vf.offsetAnon { 1693 return false 1694 } 1695 if !tf.name.isExported() { 1696 tp := tf.name.pkgPath() 1697 if tp == "" { 1698 tp = t.pkgPath.name() 1699 } 1700 vp := vf.name.pkgPath() 1701 if vp == "" { 1702 vp = v.pkgPath.name() 1703 } 1704 if tp != vp { 1705 return false 1706 } 1707 } 1708 } 1709 return true 1710 } 1711 1712 return false 1713 } 1714 1715 // typelinks is implemented in package runtime. 1716 // It returns a slice of the sections in each module, 1717 // and a slice of *rtype offsets in each module. 1718 // 1719 // The types in each module are sorted by string. That is, the first 1720 // two linked types of the first module are: 1721 // 1722 // d0 := sections[0] 1723 // t1 := (*rtype)(add(d0, offset[0][0])) 1724 // t2 := (*rtype)(add(d0, offset[0][1])) 1725 // 1726 // and 1727 // 1728 // t1.String() < t2.String() 1729 // 1730 // Note that strings are not unique identifiers for types: 1731 // there can be more than one with a given string. 1732 // Only types we might want to look up are included: 1733 // pointers, channels, maps, slices, and arrays. 1734 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1735 1736 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1737 return (*rtype)(add(section, uintptr(off))) 1738 } 1739 1740 // typesByString returns the subslice of typelinks() whose elements have 1741 // the given string representation. 1742 // It may be empty (no known types with that string) or may have 1743 // multiple elements (multiple types with that string). 1744 func typesByString(s string) []*rtype { 1745 sections, offset := typelinks() 1746 var ret []*rtype 1747 1748 for offsI, offs := range offset { 1749 section := sections[offsI] 1750 1751 // We are looking for the first index i where the string becomes >= s. 1752 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1753 i, j := 0, len(offs) 1754 for i < j { 1755 h := i + (j-i)/2 // avoid overflow when computing h 1756 // i ≤ h < j 1757 if !(rtypeOff(section, offs[h]).String() >= s) { 1758 i = h + 1 // preserves f(i-1) == false 1759 } else { 1760 j = h // preserves f(j) == true 1761 } 1762 } 1763 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1764 1765 // Having found the first, linear scan forward to find the last. 1766 // We could do a second binary search, but the caller is going 1767 // to do a linear scan anyway. 1768 for j := i; j < len(offs); j++ { 1769 typ := rtypeOff(section, offs[j]) 1770 if typ.String() != s { 1771 break 1772 } 1773 ret = append(ret, typ) 1774 } 1775 } 1776 return ret 1777 } 1778 1779 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1780 var lookupCache struct { 1781 sync.RWMutex 1782 m map[cacheKey]*rtype 1783 } 1784 1785 // A cacheKey is the key for use in the lookupCache. 1786 // Four values describe any of the types we are looking for: 1787 // type kind, one or two subtypes, and an extra integer. 1788 type cacheKey struct { 1789 kind Kind 1790 t1 *rtype 1791 t2 *rtype 1792 extra uintptr 1793 } 1794 1795 // cacheGet looks for a type under the key k in the lookupCache. 1796 // If it finds one, it returns that type. 1797 // If not, it returns nil with the cache locked. 1798 // The caller is expected to use cachePut to unlock the cache. 1799 func cacheGet(k cacheKey) Type { 1800 lookupCache.RLock() 1801 t := lookupCache.m[k] 1802 lookupCache.RUnlock() 1803 if t != nil { 1804 return t 1805 } 1806 1807 lookupCache.Lock() 1808 t = lookupCache.m[k] 1809 if t != nil { 1810 lookupCache.Unlock() 1811 return t 1812 } 1813 1814 if lookupCache.m == nil { 1815 lookupCache.m = make(map[cacheKey]*rtype) 1816 } 1817 1818 return nil 1819 } 1820 1821 // cachePut stores the given type in the cache, unlocks the cache, 1822 // and returns the type. It is expected that the cache is locked 1823 // because cacheGet returned nil. 1824 func cachePut(k cacheKey, t *rtype) Type { 1825 lookupCache.m[k] = t 1826 lookupCache.Unlock() 1827 return t 1828 } 1829 1830 // The funcLookupCache caches FuncOf lookups. 1831 // FuncOf does not share the common lookupCache since cacheKey is not 1832 // sufficient to represent functions unambiguously. 1833 var funcLookupCache struct { 1834 sync.RWMutex 1835 m map[uint32][]*rtype // keyed by hash calculated in FuncOf 1836 } 1837 1838 // ChanOf returns the channel type with the given direction and element type. 1839 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1840 // 1841 // The gc runtime imposes a limit of 64 kB on channel element types. 1842 // If t's size is equal to or exceeds this limit, ChanOf panics. 1843 func ChanOf(dir ChanDir, t Type) Type { 1844 typ := t.(*rtype) 1845 1846 // Look in cache. 1847 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1848 if ch := cacheGet(ckey); ch != nil { 1849 return ch 1850 } 1851 1852 // This restriction is imposed by the gc compiler and the runtime. 1853 if typ.size >= 1<<16 { 1854 lookupCache.Unlock() 1855 panic("reflect.ChanOf: element size too large") 1856 } 1857 1858 // Look in known types. 1859 // TODO: Precedence when constructing string. 1860 var s string 1861 switch dir { 1862 default: 1863 lookupCache.Unlock() 1864 panic("reflect.ChanOf: invalid dir") 1865 case SendDir: 1866 s = "chan<- " + typ.String() 1867 case RecvDir: 1868 s = "<-chan " + typ.String() 1869 case BothDir: 1870 s = "chan " + typ.String() 1871 } 1872 for _, tt := range typesByString(s) { 1873 ch := (*chanType)(unsafe.Pointer(tt)) 1874 if ch.elem == typ && ch.dir == uintptr(dir) { 1875 return cachePut(ckey, tt) 1876 } 1877 } 1878 1879 // Make a channel type. 1880 var ichan interface{} = (chan unsafe.Pointer)(nil) 1881 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1882 ch := *prototype 1883 ch.tflag = 0 1884 ch.dir = uintptr(dir) 1885 ch.str = resolveReflectName(newName(s, "", "", false)) 1886 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1887 ch.elem = typ 1888 1889 return cachePut(ckey, &ch.rtype) 1890 } 1891 1892 func ismapkey(*rtype) bool // implemented in runtime 1893 1894 // MapOf returns the map type with the given key and element types. 1895 // For example, if k represents int and e represents string, 1896 // MapOf(k, e) represents map[int]string. 1897 // 1898 // If the key type is not a valid map key type (that is, if it does 1899 // not implement Go's == operator), MapOf panics. 1900 func MapOf(key, elem Type) Type { 1901 ktyp := key.(*rtype) 1902 etyp := elem.(*rtype) 1903 1904 if !ismapkey(ktyp) { 1905 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1906 } 1907 1908 // Look in cache. 1909 ckey := cacheKey{Map, ktyp, etyp, 0} 1910 if mt := cacheGet(ckey); mt != nil { 1911 return mt 1912 } 1913 1914 // Look in known types. 1915 s := "map[" + ktyp.String() + "]" + etyp.String() 1916 for _, tt := range typesByString(s) { 1917 mt := (*mapType)(unsafe.Pointer(tt)) 1918 if mt.key == ktyp && mt.elem == etyp { 1919 return cachePut(ckey, tt) 1920 } 1921 } 1922 1923 // Make a map type. 1924 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1925 mt := **(**mapType)(unsafe.Pointer(&imap)) 1926 mt.str = resolveReflectName(newName(s, "", "", false)) 1927 mt.tflag = 0 1928 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1929 mt.key = ktyp 1930 mt.elem = etyp 1931 mt.bucket = bucketOf(ktyp, etyp) 1932 if ktyp.size > maxKeySize { 1933 mt.keysize = uint8(ptrSize) 1934 mt.indirectkey = 1 1935 } else { 1936 mt.keysize = uint8(ktyp.size) 1937 mt.indirectkey = 0 1938 } 1939 if etyp.size > maxValSize { 1940 mt.valuesize = uint8(ptrSize) 1941 mt.indirectvalue = 1 1942 } else { 1943 mt.valuesize = uint8(etyp.size) 1944 mt.indirectvalue = 0 1945 } 1946 mt.bucketsize = uint16(mt.bucket.size) 1947 mt.reflexivekey = isReflexive(ktyp) 1948 mt.needkeyupdate = needKeyUpdate(ktyp) 1949 mt.ptrToThis = 0 1950 1951 return cachePut(ckey, &mt.rtype) 1952 } 1953 1954 type funcTypeFixed4 struct { 1955 funcType 1956 args [4]*rtype 1957 } 1958 type funcTypeFixed8 struct { 1959 funcType 1960 args [8]*rtype 1961 } 1962 type funcTypeFixed16 struct { 1963 funcType 1964 args [16]*rtype 1965 } 1966 type funcTypeFixed32 struct { 1967 funcType 1968 args [32]*rtype 1969 } 1970 type funcTypeFixed64 struct { 1971 funcType 1972 args [64]*rtype 1973 } 1974 type funcTypeFixed128 struct { 1975 funcType 1976 args [128]*rtype 1977 } 1978 1979 // FuncOf returns the function type with the given argument and result types. 1980 // For example if k represents int and e represents string, 1981 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1982 // 1983 // The variadic argument controls whether the function is variadic. FuncOf 1984 // panics if the in[len(in)-1] does not represent a slice and variadic is 1985 // true. 1986 func FuncOf(in, out []Type, variadic bool) Type { 1987 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1988 panic("reflect.FuncOf: last arg of variadic func must be slice") 1989 } 1990 1991 // Make a func type. 1992 var ifunc interface{} = (func())(nil) 1993 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1994 n := len(in) + len(out) 1995 1996 var ft *funcType 1997 var args []*rtype 1998 switch { 1999 case n <= 4: 2000 fixed := new(funcTypeFixed4) 2001 args = fixed.args[:0:len(fixed.args)] 2002 ft = &fixed.funcType 2003 case n <= 8: 2004 fixed := new(funcTypeFixed8) 2005 args = fixed.args[:0:len(fixed.args)] 2006 ft = &fixed.funcType 2007 case n <= 16: 2008 fixed := new(funcTypeFixed16) 2009 args = fixed.args[:0:len(fixed.args)] 2010 ft = &fixed.funcType 2011 case n <= 32: 2012 fixed := new(funcTypeFixed32) 2013 args = fixed.args[:0:len(fixed.args)] 2014 ft = &fixed.funcType 2015 case n <= 64: 2016 fixed := new(funcTypeFixed64) 2017 args = fixed.args[:0:len(fixed.args)] 2018 ft = &fixed.funcType 2019 case n <= 128: 2020 fixed := new(funcTypeFixed128) 2021 args = fixed.args[:0:len(fixed.args)] 2022 ft = &fixed.funcType 2023 default: 2024 panic("reflect.FuncOf: too many arguments") 2025 } 2026 *ft = *prototype 2027 2028 // Build a hash and minimally populate ft. 2029 var hash uint32 2030 for _, in := range in { 2031 t := in.(*rtype) 2032 args = append(args, t) 2033 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2034 } 2035 if variadic { 2036 hash = fnv1(hash, 'v') 2037 } 2038 hash = fnv1(hash, '.') 2039 for _, out := range out { 2040 t := out.(*rtype) 2041 args = append(args, t) 2042 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2043 } 2044 if len(args) > 50 { 2045 panic("reflect.FuncOf does not support more than 50 arguments") 2046 } 2047 ft.tflag = 0 2048 ft.hash = hash 2049 ft.inCount = uint16(len(in)) 2050 ft.outCount = uint16(len(out)) 2051 if variadic { 2052 ft.outCount |= 1 << 15 2053 } 2054 2055 // Look in cache. 2056 funcLookupCache.RLock() 2057 for _, t := range funcLookupCache.m[hash] { 2058 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2059 funcLookupCache.RUnlock() 2060 return t 2061 } 2062 } 2063 funcLookupCache.RUnlock() 2064 2065 // Not in cache, lock and retry. 2066 funcLookupCache.Lock() 2067 defer funcLookupCache.Unlock() 2068 if funcLookupCache.m == nil { 2069 funcLookupCache.m = make(map[uint32][]*rtype) 2070 } 2071 for _, t := range funcLookupCache.m[hash] { 2072 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2073 return t 2074 } 2075 } 2076 2077 // Look in known types for the same string representation. 2078 str := funcStr(ft) 2079 for _, tt := range typesByString(str) { 2080 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2081 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt) 2082 return tt 2083 } 2084 } 2085 2086 // Populate the remaining fields of ft and store in cache. 2087 ft.str = resolveReflectName(newName(str, "", "", false)) 2088 ft.ptrToThis = 0 2089 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype) 2090 2091 return &ft.rtype 2092 } 2093 2094 // funcStr builds a string representation of a funcType. 2095 func funcStr(ft *funcType) string { 2096 repr := make([]byte, 0, 64) 2097 repr = append(repr, "func("...) 2098 for i, t := range ft.in() { 2099 if i > 0 { 2100 repr = append(repr, ", "...) 2101 } 2102 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2103 repr = append(repr, "..."...) 2104 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2105 } else { 2106 repr = append(repr, t.String()...) 2107 } 2108 } 2109 repr = append(repr, ')') 2110 out := ft.out() 2111 if len(out) == 1 { 2112 repr = append(repr, ' ') 2113 } else if len(out) > 1 { 2114 repr = append(repr, " ("...) 2115 } 2116 for i, t := range out { 2117 if i > 0 { 2118 repr = append(repr, ", "...) 2119 } 2120 repr = append(repr, t.String()...) 2121 } 2122 if len(out) > 1 { 2123 repr = append(repr, ')') 2124 } 2125 return string(repr) 2126 } 2127 2128 // isReflexive reports whether the == operation on the type is reflexive. 2129 // That is, x == x for all values x of type t. 2130 func isReflexive(t *rtype) bool { 2131 switch t.Kind() { 2132 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2133 return true 2134 case Float32, Float64, Complex64, Complex128, Interface: 2135 return false 2136 case Array: 2137 tt := (*arrayType)(unsafe.Pointer(t)) 2138 return isReflexive(tt.elem) 2139 case Struct: 2140 tt := (*structType)(unsafe.Pointer(t)) 2141 for _, f := range tt.fields { 2142 if !isReflexive(f.typ) { 2143 return false 2144 } 2145 } 2146 return true 2147 default: 2148 // Func, Map, Slice, Invalid 2149 panic("isReflexive called on non-key type " + t.String()) 2150 } 2151 } 2152 2153 // needKeyUpdate reports whether map overwrites require the key to be copied. 2154 func needKeyUpdate(t *rtype) bool { 2155 switch t.Kind() { 2156 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2157 return false 2158 case Float32, Float64, Complex64, Complex128, Interface, String: 2159 // Float keys can be updated from +0 to -0. 2160 // String keys can be updated to use a smaller backing store. 2161 // Interfaces might have floats of strings in them. 2162 return true 2163 case Array: 2164 tt := (*arrayType)(unsafe.Pointer(t)) 2165 return needKeyUpdate(tt.elem) 2166 case Struct: 2167 tt := (*structType)(unsafe.Pointer(t)) 2168 for _, f := range tt.fields { 2169 if needKeyUpdate(f.typ) { 2170 return true 2171 } 2172 } 2173 return false 2174 default: 2175 // Func, Map, Slice, Invalid 2176 panic("needKeyUpdate called on non-key type " + t.String()) 2177 } 2178 } 2179 2180 // Make sure these routines stay in sync with ../../runtime/hashmap.go! 2181 // These types exist only for GC, so we only fill out GC relevant info. 2182 // Currently, that's just size and the GC program. We also fill in string 2183 // for possible debugging use. 2184 const ( 2185 bucketSize uintptr = 8 2186 maxKeySize uintptr = 128 2187 maxValSize uintptr = 128 2188 ) 2189 2190 func bucketOf(ktyp, etyp *rtype) *rtype { 2191 // See comment on hmap.overflow in ../runtime/hashmap.go. 2192 var kind uint8 2193 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 && 2194 ktyp.size <= maxKeySize && etyp.size <= maxValSize { 2195 kind = kindNoPointers 2196 } 2197 2198 if ktyp.size > maxKeySize { 2199 ktyp = PtrTo(ktyp).(*rtype) 2200 } 2201 if etyp.size > maxValSize { 2202 etyp = PtrTo(etyp).(*rtype) 2203 } 2204 2205 // Prepare GC data if any. 2206 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2207 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2208 // Note that since the key and value are known to be <= 128 bytes, 2209 // they're guaranteed to have bitmaps instead of GC programs. 2210 var gcdata *byte 2211 var ptrdata uintptr 2212 var overflowPad uintptr 2213 2214 // On NaCl, pad if needed to make overflow end at the proper struct alignment. 2215 // On other systems, align > ptrSize is not possible. 2216 if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) { 2217 overflowPad = ptrSize 2218 } 2219 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2220 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2221 panic("reflect: bad size computation in MapOf") 2222 } 2223 2224 if kind != kindNoPointers { 2225 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2226 mask := make([]byte, (nptr+7)/8) 2227 base := bucketSize / ptrSize 2228 2229 if ktyp.kind&kindNoPointers == 0 { 2230 if ktyp.kind&kindGCProg != 0 { 2231 panic("reflect: unexpected GC program in MapOf") 2232 } 2233 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata)) 2234 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ { 2235 if (kmask[i/8]>>(i%8))&1 != 0 { 2236 for j := uintptr(0); j < bucketSize; j++ { 2237 word := base + j*ktyp.size/ptrSize + i 2238 mask[word/8] |= 1 << (word % 8) 2239 } 2240 } 2241 } 2242 } 2243 base += bucketSize * ktyp.size / ptrSize 2244 2245 if etyp.kind&kindNoPointers == 0 { 2246 if etyp.kind&kindGCProg != 0 { 2247 panic("reflect: unexpected GC program in MapOf") 2248 } 2249 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata)) 2250 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ { 2251 if (emask[i/8]>>(i%8))&1 != 0 { 2252 for j := uintptr(0); j < bucketSize; j++ { 2253 word := base + j*etyp.size/ptrSize + i 2254 mask[word/8] |= 1 << (word % 8) 2255 } 2256 } 2257 } 2258 } 2259 base += bucketSize * etyp.size / ptrSize 2260 base += overflowPad / ptrSize 2261 2262 word := base 2263 mask[word/8] |= 1 << (word % 8) 2264 gcdata = &mask[0] 2265 ptrdata = (word + 1) * ptrSize 2266 2267 // overflow word must be last 2268 if ptrdata != size { 2269 panic("reflect: bad layout computation in MapOf") 2270 } 2271 } 2272 2273 b := &rtype{ 2274 align: ptrSize, 2275 size: size, 2276 kind: kind, 2277 ptrdata: ptrdata, 2278 gcdata: gcdata, 2279 } 2280 if overflowPad > 0 { 2281 b.align = 8 2282 } 2283 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2284 b.str = resolveReflectName(newName(s, "", "", false)) 2285 return b 2286 } 2287 2288 // SliceOf returns the slice type with element type t. 2289 // For example, if t represents int, SliceOf(t) represents []int. 2290 func SliceOf(t Type) Type { 2291 typ := t.(*rtype) 2292 2293 // Look in cache. 2294 ckey := cacheKey{Slice, typ, nil, 0} 2295 if slice := cacheGet(ckey); slice != nil { 2296 return slice 2297 } 2298 2299 // Look in known types. 2300 s := "[]" + typ.String() 2301 for _, tt := range typesByString(s) { 2302 slice := (*sliceType)(unsafe.Pointer(tt)) 2303 if slice.elem == typ { 2304 return cachePut(ckey, tt) 2305 } 2306 } 2307 2308 // Make a slice type. 2309 var islice interface{} = ([]unsafe.Pointer)(nil) 2310 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2311 slice := *prototype 2312 slice.tflag = 0 2313 slice.str = resolveReflectName(newName(s, "", "", false)) 2314 slice.hash = fnv1(typ.hash, '[') 2315 slice.elem = typ 2316 slice.ptrToThis = 0 2317 2318 return cachePut(ckey, &slice.rtype) 2319 } 2320 2321 // The structLookupCache caches StructOf lookups. 2322 // StructOf does not share the common lookupCache since we need to pin 2323 // the memory associated with *structTypeFixedN. 2324 var structLookupCache struct { 2325 sync.RWMutex 2326 m map[uint32][]interface { 2327 common() *rtype 2328 } // keyed by hash calculated in StructOf 2329 } 2330 2331 type structTypeUncommon struct { 2332 structType 2333 u uncommonType 2334 } 2335 2336 // A *rtype representing a struct is followed directly in memory by an 2337 // array of method objects representing the methods attached to the 2338 // struct. To get the same layout for a run time generated type, we 2339 // need an array directly following the uncommonType memory. The types 2340 // structTypeFixed4, ...structTypeFixedN are used to do this. 2341 // 2342 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2343 2344 // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs 2345 // have no methods, they could be defined at runtime using the StructOf 2346 // function. 2347 2348 type structTypeFixed4 struct { 2349 structType 2350 u uncommonType 2351 m [4]method 2352 } 2353 2354 type structTypeFixed8 struct { 2355 structType 2356 u uncommonType 2357 m [8]method 2358 } 2359 2360 type structTypeFixed16 struct { 2361 structType 2362 u uncommonType 2363 m [16]method 2364 } 2365 2366 type structTypeFixed32 struct { 2367 structType 2368 u uncommonType 2369 m [32]method 2370 } 2371 2372 // StructOf returns the struct type containing fields. 2373 // The Offset and Index fields are ignored and computed as they would be 2374 // by the compiler. 2375 // 2376 // StructOf currently does not generate wrapper methods for embedded fields. 2377 // This limitation may be lifted in a future version. 2378 func StructOf(fields []StructField) Type { 2379 var ( 2380 hash = fnv1(0, []byte("struct {")...) 2381 size uintptr 2382 typalign uint8 2383 comparable = true 2384 hashable = true 2385 methods []method 2386 2387 fs = make([]structField, len(fields)) 2388 repr = make([]byte, 0, 64) 2389 fset = map[string]struct{}{} // fields' names 2390 2391 hasPtr = false // records whether at least one struct-field is a pointer 2392 hasGCProg = false // records whether a struct-field type has a GCProg 2393 ) 2394 2395 lastzero := uintptr(0) 2396 repr = append(repr, "struct {"...) 2397 for i, field := range fields { 2398 if field.Name == "" { 2399 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2400 } 2401 if field.Type == nil { 2402 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2403 } 2404 f := runtimeStructField(field) 2405 ft := f.typ 2406 if ft.kind&kindGCProg != 0 { 2407 hasGCProg = true 2408 } 2409 if ft.pointers() { 2410 hasPtr = true 2411 } 2412 2413 // Update string and hash 2414 name := f.name.name() 2415 hash = fnv1(hash, []byte(name)...) 2416 repr = append(repr, (" " + name)...) 2417 if f.anon() { 2418 // Embedded field 2419 if f.typ.Kind() == Ptr { 2420 // Embedded ** and *interface{} are illegal 2421 elem := ft.Elem() 2422 if k := elem.Kind(); k == Ptr || k == Interface { 2423 panic("reflect.StructOf: illegal anonymous field type " + ft.String()) 2424 } 2425 } 2426 2427 switch f.typ.Kind() { 2428 case Interface: 2429 ift := (*interfaceType)(unsafe.Pointer(ft)) 2430 for im, m := range ift.methods { 2431 if ift.nameOff(m.name).pkgPath() != "" { 2432 // TODO(sbinet) 2433 panic("reflect: embedded interface with unexported method(s) not implemented") 2434 } 2435 2436 var ( 2437 mtyp = ift.typeOff(m.typ) 2438 ifield = i 2439 imethod = im 2440 ifn Value 2441 tfn Value 2442 ) 2443 2444 if ft.kind&kindDirectIface != 0 { 2445 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2446 var args []Value 2447 var recv = in[0] 2448 if len(in) > 1 { 2449 args = in[1:] 2450 } 2451 return recv.Field(ifield).Method(imethod).Call(args) 2452 }) 2453 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2454 var args []Value 2455 var recv = in[0] 2456 if len(in) > 1 { 2457 args = in[1:] 2458 } 2459 return recv.Field(ifield).Method(imethod).Call(args) 2460 }) 2461 } else { 2462 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2463 var args []Value 2464 var recv = in[0] 2465 if len(in) > 1 { 2466 args = in[1:] 2467 } 2468 return recv.Field(ifield).Method(imethod).Call(args) 2469 }) 2470 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2471 var args []Value 2472 var recv = Indirect(in[0]) 2473 if len(in) > 1 { 2474 args = in[1:] 2475 } 2476 return recv.Field(ifield).Method(imethod).Call(args) 2477 }) 2478 } 2479 2480 methods = append(methods, method{ 2481 name: resolveReflectName(ift.nameOff(m.name)), 2482 mtyp: resolveReflectType(mtyp), 2483 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2484 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2485 }) 2486 } 2487 case Ptr: 2488 ptr := (*ptrType)(unsafe.Pointer(ft)) 2489 if unt := ptr.uncommon(); unt != nil { 2490 for _, m := range unt.methods() { 2491 mname := ptr.nameOff(m.name) 2492 if mname.pkgPath() != "" { 2493 // TODO(sbinet) 2494 panic("reflect: embedded interface with unexported method(s) not implemented") 2495 } 2496 methods = append(methods, method{ 2497 name: resolveReflectName(mname), 2498 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2499 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2500 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2501 }) 2502 } 2503 } 2504 if unt := ptr.elem.uncommon(); unt != nil { 2505 for _, m := range unt.methods() { 2506 mname := ptr.nameOff(m.name) 2507 if mname.pkgPath() != "" { 2508 // TODO(sbinet) 2509 panic("reflect: embedded interface with unexported method(s) not implemented") 2510 } 2511 methods = append(methods, method{ 2512 name: resolveReflectName(mname), 2513 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2514 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2515 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2516 }) 2517 } 2518 } 2519 default: 2520 if unt := ft.uncommon(); unt != nil { 2521 for _, m := range unt.methods() { 2522 mname := ft.nameOff(m.name) 2523 if mname.pkgPath() != "" { 2524 // TODO(sbinet) 2525 panic("reflect: embedded interface with unexported method(s) not implemented") 2526 } 2527 methods = append(methods, method{ 2528 name: resolveReflectName(mname), 2529 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2530 ifn: resolveReflectText(ft.textOff(m.ifn)), 2531 tfn: resolveReflectText(ft.textOff(m.tfn)), 2532 }) 2533 2534 } 2535 } 2536 } 2537 } 2538 if _, dup := fset[name]; dup { 2539 panic("reflect.StructOf: duplicate field " + name) 2540 } 2541 fset[name] = struct{}{} 2542 2543 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2544 2545 repr = append(repr, (" " + ft.String())...) 2546 if f.name.tagLen() > 0 { 2547 hash = fnv1(hash, []byte(f.name.tag())...) 2548 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2549 } 2550 if i < len(fields)-1 { 2551 repr = append(repr, ';') 2552 } 2553 2554 comparable = comparable && (ft.alg.equal != nil) 2555 hashable = hashable && (ft.alg.hash != nil) 2556 2557 offset := align(size, uintptr(ft.align)) 2558 if ft.align > typalign { 2559 typalign = ft.align 2560 } 2561 size = offset + ft.size 2562 f.offsetAnon |= offset << 1 2563 2564 if ft.size == 0 { 2565 lastzero = size 2566 } 2567 2568 fs[i] = f 2569 } 2570 2571 if size > 0 && lastzero == size { 2572 // This is a non-zero sized struct that ends in a 2573 // zero-sized field. We add an extra byte of padding, 2574 // to ensure that taking the address of the final 2575 // zero-sized field can't manufacture a pointer to the 2576 // next object in the heap. See issue 9401. 2577 size++ 2578 } 2579 2580 var typ *structType 2581 var ut *uncommonType 2582 var typPin interface { 2583 common() *rtype 2584 } // structTypeFixedN 2585 2586 switch { 2587 case len(methods) == 0: 2588 t := new(structTypeUncommon) 2589 typ = &t.structType 2590 ut = &t.u 2591 typPin = t 2592 case len(methods) <= 4: 2593 t := new(structTypeFixed4) 2594 typ = &t.structType 2595 ut = &t.u 2596 copy(t.m[:], methods) 2597 typPin = t 2598 case len(methods) <= 8: 2599 t := new(structTypeFixed8) 2600 typ = &t.structType 2601 ut = &t.u 2602 copy(t.m[:], methods) 2603 typPin = t 2604 case len(methods) <= 16: 2605 t := new(structTypeFixed16) 2606 typ = &t.structType 2607 ut = &t.u 2608 copy(t.m[:], methods) 2609 typPin = t 2610 case len(methods) <= 32: 2611 t := new(structTypeFixed32) 2612 typ = &t.structType 2613 ut = &t.u 2614 copy(t.m[:], methods) 2615 typPin = t 2616 default: 2617 panic("reflect.StructOf: too many methods") 2618 } 2619 ut.mcount = uint16(len(methods)) 2620 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2621 2622 if len(fs) > 0 { 2623 repr = append(repr, ' ') 2624 } 2625 repr = append(repr, '}') 2626 hash = fnv1(hash, '}') 2627 str := string(repr) 2628 2629 // Round the size up to be a multiple of the alignment. 2630 size = align(size, uintptr(typalign)) 2631 2632 // Make the struct type. 2633 var istruct interface{} = struct{}{} 2634 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2635 *typ = *prototype 2636 typ.fields = fs 2637 2638 // Look in cache 2639 structLookupCache.RLock() 2640 for _, st := range structLookupCache.m[hash] { 2641 t := st.common() 2642 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2643 structLookupCache.RUnlock() 2644 return t 2645 } 2646 } 2647 structLookupCache.RUnlock() 2648 2649 // not in cache, lock and retry 2650 structLookupCache.Lock() 2651 defer structLookupCache.Unlock() 2652 if structLookupCache.m == nil { 2653 structLookupCache.m = make(map[uint32][]interface { 2654 common() *rtype 2655 }) 2656 } 2657 for _, st := range structLookupCache.m[hash] { 2658 t := st.common() 2659 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2660 return t 2661 } 2662 } 2663 2664 // Look in known types. 2665 for _, t := range typesByString(str) { 2666 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2667 // even if 't' wasn't a structType with methods, we should be ok 2668 // as the 'u uncommonType' field won't be accessed except when 2669 // tflag&tflagUncommon is set. 2670 structLookupCache.m[hash] = append(structLookupCache.m[hash], t) 2671 return t 2672 } 2673 } 2674 2675 typ.str = resolveReflectName(newName(str, "", "", false)) 2676 typ.tflag = 0 2677 typ.hash = hash 2678 typ.size = size 2679 typ.align = typalign 2680 typ.fieldAlign = typalign 2681 typ.ptrToThis = 0 2682 if len(methods) > 0 { 2683 typ.tflag |= tflagUncommon 2684 } 2685 if !hasPtr { 2686 typ.kind |= kindNoPointers 2687 } else { 2688 typ.kind &^= kindNoPointers 2689 } 2690 2691 if hasGCProg { 2692 lastPtrField := 0 2693 for i, ft := range fs { 2694 if ft.typ.pointers() { 2695 lastPtrField = i 2696 } 2697 } 2698 prog := []byte{0, 0, 0, 0} // will be length of prog 2699 for i, ft := range fs { 2700 if i > lastPtrField { 2701 // gcprog should not include anything for any field after 2702 // the last field that contains pointer data 2703 break 2704 } 2705 // FIXME(sbinet) handle padding, fields smaller than a word 2706 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:] 2707 elemPtrs := ft.typ.ptrdata / ptrSize 2708 switch { 2709 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0: 2710 // Element is small with pointer mask; use as literal bits. 2711 mask := elemGC 2712 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2713 var n uintptr 2714 for n := elemPtrs; n > 120; n -= 120 { 2715 prog = append(prog, 120) 2716 prog = append(prog, mask[:15]...) 2717 mask = mask[15:] 2718 } 2719 prog = append(prog, byte(n)) 2720 prog = append(prog, mask[:(n+7)/8]...) 2721 case ft.typ.kind&kindGCProg != 0: 2722 // Element has GC program; emit one element. 2723 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2724 prog = append(prog, elemProg...) 2725 } 2726 // Pad from ptrdata to size. 2727 elemWords := ft.typ.size / ptrSize 2728 if elemPtrs < elemWords { 2729 // Emit literal 0 bit, then repeat as needed. 2730 prog = append(prog, 0x01, 0x00) 2731 if elemPtrs+1 < elemWords { 2732 prog = append(prog, 0x81) 2733 prog = appendVarint(prog, elemWords-elemPtrs-1) 2734 } 2735 } 2736 } 2737 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2738 typ.kind |= kindGCProg 2739 typ.gcdata = &prog[0] 2740 } else { 2741 typ.kind &^= kindGCProg 2742 bv := new(bitVector) 2743 addTypeBits(bv, 0, typ.common()) 2744 if len(bv.data) > 0 { 2745 typ.gcdata = &bv.data[0] 2746 } 2747 } 2748 typ.ptrdata = typeptrdata(typ.common()) 2749 typ.alg = new(typeAlg) 2750 if hashable { 2751 typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr { 2752 o := seed 2753 for _, ft := range typ.fields { 2754 pi := unsafe.Pointer(uintptr(p) + ft.offset()) 2755 o = ft.typ.alg.hash(pi, o) 2756 } 2757 return o 2758 } 2759 } 2760 2761 if comparable { 2762 typ.alg.equal = func(p, q unsafe.Pointer) bool { 2763 for _, ft := range typ.fields { 2764 pi := unsafe.Pointer(uintptr(p) + ft.offset()) 2765 qi := unsafe.Pointer(uintptr(q) + ft.offset()) 2766 if !ft.typ.alg.equal(pi, qi) { 2767 return false 2768 } 2769 } 2770 return true 2771 } 2772 } 2773 2774 switch { 2775 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2776 // structs of 1 direct iface type can be direct 2777 typ.kind |= kindDirectIface 2778 default: 2779 typ.kind &^= kindDirectIface 2780 } 2781 2782 structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin) 2783 return &typ.rtype 2784 } 2785 2786 func runtimeStructField(field StructField) structField { 2787 if field.PkgPath != "" { 2788 panic("reflect.StructOf: StructOf does not allow unexported fields") 2789 } 2790 2791 // Best-effort check for misuse. 2792 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through. 2793 c := field.Name[0] 2794 if 'a' <= c && c <= 'z' || c == '_' { 2795 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2796 } 2797 2798 offsetAnon := uintptr(0) 2799 if field.Anonymous { 2800 offsetAnon |= 1 2801 } 2802 2803 resolveReflectType(field.Type.common()) // install in runtime 2804 return structField{ 2805 name: newName(field.Name, string(field.Tag), "", true), 2806 typ: field.Type.common(), 2807 offsetAnon: offsetAnon, 2808 } 2809 } 2810 2811 // typeptrdata returns the length in bytes of the prefix of t 2812 // containing pointer data. Anything after this offset is scalar data. 2813 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2814 func typeptrdata(t *rtype) uintptr { 2815 if !t.pointers() { 2816 return 0 2817 } 2818 switch t.Kind() { 2819 case Struct: 2820 st := (*structType)(unsafe.Pointer(t)) 2821 // find the last field that has pointers. 2822 field := 0 2823 for i := range st.fields { 2824 ft := st.fields[i].typ 2825 if ft.pointers() { 2826 field = i 2827 } 2828 } 2829 f := st.fields[field] 2830 return f.offset() + f.typ.ptrdata 2831 2832 default: 2833 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2834 } 2835 } 2836 2837 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2838 const maxPtrmaskBytes = 2048 2839 2840 // ArrayOf returns the array type with the given count and element type. 2841 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2842 // 2843 // If the resulting type would be larger than the available address space, 2844 // ArrayOf panics. 2845 func ArrayOf(count int, elem Type) Type { 2846 typ := elem.(*rtype) 2847 // call SliceOf here as it calls cacheGet/cachePut. 2848 // ArrayOf also calls cacheGet/cachePut and thus may modify the state of 2849 // the lookupCache mutex. 2850 slice := SliceOf(elem) 2851 2852 // Look in cache. 2853 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2854 if array := cacheGet(ckey); array != nil { 2855 return array 2856 } 2857 2858 // Look in known types. 2859 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2860 for _, tt := range typesByString(s) { 2861 array := (*arrayType)(unsafe.Pointer(tt)) 2862 if array.elem == typ { 2863 return cachePut(ckey, tt) 2864 } 2865 } 2866 2867 // Make an array type. 2868 var iarray interface{} = [1]unsafe.Pointer{} 2869 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2870 array := *prototype 2871 array.str = resolveReflectName(newName(s, "", "", false)) 2872 array.hash = fnv1(typ.hash, '[') 2873 for n := uint32(count); n > 0; n >>= 8 { 2874 array.hash = fnv1(array.hash, byte(n)) 2875 } 2876 array.hash = fnv1(array.hash, ']') 2877 array.elem = typ 2878 array.ptrToThis = 0 2879 max := ^uintptr(0) / typ.size 2880 if uintptr(count) > max { 2881 panic("reflect.ArrayOf: array size would exceed virtual address space") 2882 } 2883 array.size = typ.size * uintptr(count) 2884 if count > 0 && typ.ptrdata != 0 { 2885 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2886 } 2887 array.align = typ.align 2888 array.fieldAlign = typ.fieldAlign 2889 array.len = uintptr(count) 2890 array.slice = slice.(*rtype) 2891 2892 array.kind &^= kindNoPointers 2893 switch { 2894 case typ.kind&kindNoPointers != 0 || array.size == 0: 2895 // No pointers. 2896 array.kind |= kindNoPointers 2897 array.gcdata = nil 2898 array.ptrdata = 0 2899 2900 case count == 1: 2901 // In memory, 1-element array looks just like the element. 2902 array.kind |= typ.kind & kindGCProg 2903 array.gcdata = typ.gcdata 2904 array.ptrdata = typ.ptrdata 2905 2906 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2907 // Element is small with pointer mask; array is still small. 2908 // Create direct pointer mask by turning each 1 bit in elem 2909 // into count 1 bits in larger mask. 2910 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2911 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2912 elemWords := typ.size / ptrSize 2913 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ { 2914 if (elemMask[j/8]>>(j%8))&1 != 0 { 2915 for i := uintptr(0); i < array.len; i++ { 2916 k := i*elemWords + j 2917 mask[k/8] |= 1 << (k % 8) 2918 } 2919 } 2920 } 2921 array.gcdata = &mask[0] 2922 2923 default: 2924 // Create program that emits one element 2925 // and then repeats to make the array. 2926 prog := []byte{0, 0, 0, 0} // will be length of prog 2927 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2928 elemPtrs := typ.ptrdata / ptrSize 2929 if typ.kind&kindGCProg == 0 { 2930 // Element is small with pointer mask; use as literal bits. 2931 mask := elemGC 2932 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2933 var n uintptr 2934 for n = elemPtrs; n > 120; n -= 120 { 2935 prog = append(prog, 120) 2936 prog = append(prog, mask[:15]...) 2937 mask = mask[15:] 2938 } 2939 prog = append(prog, byte(n)) 2940 prog = append(prog, mask[:(n+7)/8]...) 2941 } else { 2942 // Element has GC program; emit one element. 2943 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2944 prog = append(prog, elemProg...) 2945 } 2946 // Pad from ptrdata to size. 2947 elemWords := typ.size / ptrSize 2948 if elemPtrs < elemWords { 2949 // Emit literal 0 bit, then repeat as needed. 2950 prog = append(prog, 0x01, 0x00) 2951 if elemPtrs+1 < elemWords { 2952 prog = append(prog, 0x81) 2953 prog = appendVarint(prog, elemWords-elemPtrs-1) 2954 } 2955 } 2956 // Repeat count-1 times. 2957 if elemWords < 0x80 { 2958 prog = append(prog, byte(elemWords|0x80)) 2959 } else { 2960 prog = append(prog, 0x80) 2961 prog = appendVarint(prog, elemWords) 2962 } 2963 prog = appendVarint(prog, uintptr(count)-1) 2964 prog = append(prog, 0) 2965 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2966 array.kind |= kindGCProg 2967 array.gcdata = &prog[0] 2968 array.ptrdata = array.size // overestimate but ok; must match program 2969 } 2970 2971 etyp := typ.common() 2972 esize := etyp.Size() 2973 ealg := etyp.alg 2974 2975 array.alg = new(typeAlg) 2976 if ealg.equal != nil { 2977 eequal := ealg.equal 2978 array.alg.equal = func(p, q unsafe.Pointer) bool { 2979 for i := 0; i < count; i++ { 2980 pi := arrayAt(p, i, esize) 2981 qi := arrayAt(q, i, esize) 2982 if !eequal(pi, qi) { 2983 return false 2984 } 2985 2986 } 2987 return true 2988 } 2989 } 2990 if ealg.hash != nil { 2991 ehash := ealg.hash 2992 array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr { 2993 o := seed 2994 for i := 0; i < count; i++ { 2995 o = ehash(arrayAt(ptr, i, esize), o) 2996 } 2997 return o 2998 } 2999 } 3000 3001 switch { 3002 case count == 1 && !ifaceIndir(typ): 3003 // array of 1 direct iface type can be direct 3004 array.kind |= kindDirectIface 3005 default: 3006 array.kind &^= kindDirectIface 3007 } 3008 3009 return cachePut(ckey, &array.rtype) 3010 } 3011 3012 func appendVarint(x []byte, v uintptr) []byte { 3013 for ; v >= 0x80; v >>= 7 { 3014 x = append(x, byte(v|0x80)) 3015 } 3016 x = append(x, byte(v)) 3017 return x 3018 } 3019 3020 // toType converts from a *rtype to a Type that can be returned 3021 // to the client of package reflect. In gc, the only concern is that 3022 // a nil *rtype must be replaced by a nil Type, but in gccgo this 3023 // function takes care of ensuring that multiple *rtype for the same 3024 // type are coalesced into a single Type. 3025 func toType(t *rtype) Type { 3026 if t == nil { 3027 return nil 3028 } 3029 return t 3030 } 3031 3032 type layoutKey struct { 3033 t *rtype // function signature 3034 rcvr *rtype // receiver type, or nil if none 3035 } 3036 3037 type layoutType struct { 3038 t *rtype 3039 argSize uintptr // size of arguments 3040 retOffset uintptr // offset of return values. 3041 stack *bitVector 3042 framePool *sync.Pool 3043 } 3044 3045 var layoutCache struct { 3046 sync.RWMutex 3047 m map[layoutKey]layoutType 3048 } 3049 3050 // funcLayout computes a struct type representing the layout of the 3051 // function arguments and return values for the function type t. 3052 // If rcvr != nil, rcvr specifies the type of the receiver. 3053 // The returned type exists only for GC, so we only fill out GC relevant info. 3054 // Currently, that's just size and the GC program. We also fill in 3055 // the name for possible debugging use. 3056 func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3057 if t.Kind() != Func { 3058 panic("reflect: funcLayout of non-func type") 3059 } 3060 if rcvr != nil && rcvr.Kind() == Interface { 3061 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3062 } 3063 k := layoutKey{t, rcvr} 3064 layoutCache.RLock() 3065 if x := layoutCache.m[k]; x.t != nil { 3066 layoutCache.RUnlock() 3067 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3068 } 3069 layoutCache.RUnlock() 3070 layoutCache.Lock() 3071 if x := layoutCache.m[k]; x.t != nil { 3072 layoutCache.Unlock() 3073 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3074 } 3075 3076 tt := (*funcType)(unsafe.Pointer(t)) 3077 3078 // compute gc program & stack bitmap for arguments 3079 ptrmap := new(bitVector) 3080 var offset uintptr 3081 if rcvr != nil { 3082 // Reflect uses the "interface" calling convention for 3083 // methods, where receivers take one word of argument 3084 // space no matter how big they actually are. 3085 if ifaceIndir(rcvr) || rcvr.pointers() { 3086 ptrmap.append(1) 3087 } 3088 offset += ptrSize 3089 } 3090 for _, arg := range tt.in() { 3091 offset += -offset & uintptr(arg.align-1) 3092 addTypeBits(ptrmap, offset, arg) 3093 offset += arg.size 3094 } 3095 argN := ptrmap.n 3096 argSize = offset 3097 if runtime.GOARCH == "amd64p32" { 3098 offset += -offset & (8 - 1) 3099 } 3100 offset += -offset & (ptrSize - 1) 3101 retOffset = offset 3102 for _, res := range tt.out() { 3103 offset += -offset & uintptr(res.align-1) 3104 addTypeBits(ptrmap, offset, res) 3105 offset += res.size 3106 } 3107 offset += -offset & (ptrSize - 1) 3108 3109 // build dummy rtype holding gc program 3110 x := &rtype{ 3111 align: ptrSize, 3112 size: offset, 3113 ptrdata: uintptr(ptrmap.n) * ptrSize, 3114 } 3115 if runtime.GOARCH == "amd64p32" { 3116 x.align = 8 3117 } 3118 if ptrmap.n > 0 { 3119 x.gcdata = &ptrmap.data[0] 3120 } else { 3121 x.kind |= kindNoPointers 3122 } 3123 ptrmap.n = argN 3124 3125 var s string 3126 if rcvr != nil { 3127 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3128 } else { 3129 s = "funcargs(" + t.String() + ")" 3130 } 3131 x.str = resolveReflectName(newName(s, "", "", false)) 3132 3133 // cache result for future callers 3134 if layoutCache.m == nil { 3135 layoutCache.m = make(map[layoutKey]layoutType) 3136 } 3137 framePool = &sync.Pool{New: func() interface{} { 3138 return unsafe_New(x) 3139 }} 3140 layoutCache.m[k] = layoutType{ 3141 t: x, 3142 argSize: argSize, 3143 retOffset: retOffset, 3144 stack: ptrmap, 3145 framePool: framePool, 3146 } 3147 layoutCache.Unlock() 3148 return x, argSize, retOffset, ptrmap, framePool 3149 } 3150 3151 // ifaceIndir reports whether t is stored indirectly in an interface value. 3152 func ifaceIndir(t *rtype) bool { 3153 return t.kind&kindDirectIface == 0 3154 } 3155 3156 // Layout matches runtime.gobitvector (well enough). 3157 type bitVector struct { 3158 n uint32 // number of bits 3159 data []byte 3160 } 3161 3162 // append a bit to the bitmap. 3163 func (bv *bitVector) append(bit uint8) { 3164 if bv.n%8 == 0 { 3165 bv.data = append(bv.data, 0) 3166 } 3167 bv.data[bv.n/8] |= bit << (bv.n % 8) 3168 bv.n++ 3169 } 3170 3171 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3172 if t.kind&kindNoPointers != 0 { 3173 return 3174 } 3175 3176 switch Kind(t.kind & kindMask) { 3177 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3178 // 1 pointer at start of representation 3179 for bv.n < uint32(offset/uintptr(ptrSize)) { 3180 bv.append(0) 3181 } 3182 bv.append(1) 3183 3184 case Interface: 3185 // 2 pointers 3186 for bv.n < uint32(offset/uintptr(ptrSize)) { 3187 bv.append(0) 3188 } 3189 bv.append(1) 3190 bv.append(1) 3191 3192 case Array: 3193 // repeat inner type 3194 tt := (*arrayType)(unsafe.Pointer(t)) 3195 for i := 0; i < int(tt.len); i++ { 3196 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3197 } 3198 3199 case Struct: 3200 // apply fields 3201 tt := (*structType)(unsafe.Pointer(t)) 3202 for i := range tt.fields { 3203 f := &tt.fields[i] 3204 addTypeBits(bv, offset+f.offset(), f.typ) 3205 } 3206 } 3207 }