github.com/FenixAra/go@v0.0.0-20170127160404-96ea0918e670/src/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "runtime" 20 "strconv" 21 "sync" 22 "unsafe" 23 ) 24 25 // Type is the representation of a Go type. 26 // 27 // Not all methods apply to all kinds of types. Restrictions, 28 // if any, are noted in the documentation for each method. 29 // Use the Kind method to find out the kind of type before 30 // calling kind-specific methods. Calling a method 31 // inappropriate to the kind of type causes a run-time panic. 32 // 33 // Type values are comparable, such as with the == operator. 34 // Two Type values are equal if they represent identical types. 35 type Type interface { 36 // Methods applicable to all types. 37 38 // Align returns the alignment in bytes of a value of 39 // this type when allocated in memory. 40 Align() int 41 42 // FieldAlign returns the alignment in bytes of a value of 43 // this type when used as a field in a struct. 44 FieldAlign() int 45 46 // Method returns the i'th method in the type's method set. 47 // It panics if i is not in the range [0, NumMethod()). 48 // 49 // For a non-interface type T or *T, the returned Method's Type and Func 50 // fields describe a function whose first argument is the receiver. 51 // 52 // For an interface type, the returned Method's Type field gives the 53 // method signature, without a receiver, and the Func field is nil. 54 Method(int) Method 55 56 // MethodByName returns the method with that name in the type's 57 // method set and a boolean indicating if the method was found. 58 // 59 // For a non-interface type T or *T, the returned Method's Type and Func 60 // fields describe a function whose first argument is the receiver. 61 // 62 // For an interface type, the returned Method's Type field gives the 63 // method signature, without a receiver, and the Func field is nil. 64 MethodByName(string) (Method, bool) 65 66 // NumMethod returns the number of exported methods in the type's method set. 67 NumMethod() int 68 69 // Name returns the type's name within its package. 70 // It returns an empty string for unnamed types. 71 Name() string 72 73 // PkgPath returns a named type's package path, that is, the import path 74 // that uniquely identifies the package, such as "encoding/base64". 75 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int), 76 // the package path will be the empty string. 77 PkgPath() string 78 79 // Size returns the number of bytes needed to store 80 // a value of the given type; it is analogous to unsafe.Sizeof. 81 Size() uintptr 82 83 // String returns a string representation of the type. 84 // The string representation may use shortened package names 85 // (e.g., base64 instead of "encoding/base64") and is not 86 // guaranteed to be unique among types. To test for type identity, 87 // compare the Types directly. 88 String() string 89 90 // Kind returns the specific kind of this type. 91 Kind() Kind 92 93 // Implements reports whether the type implements the interface type u. 94 Implements(u Type) bool 95 96 // AssignableTo reports whether a value of the type is assignable to type u. 97 AssignableTo(u Type) bool 98 99 // ConvertibleTo reports whether a value of the type is convertible to type u. 100 ConvertibleTo(u Type) bool 101 102 // Comparable reports whether values of this type are comparable. 103 Comparable() bool 104 105 // Methods applicable only to some types, depending on Kind. 106 // The methods allowed for each kind are: 107 // 108 // Int*, Uint*, Float*, Complex*: Bits 109 // Array: Elem, Len 110 // Chan: ChanDir, Elem 111 // Func: In, NumIn, Out, NumOut, IsVariadic. 112 // Map: Key, Elem 113 // Ptr: Elem 114 // Slice: Elem 115 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 116 117 // Bits returns the size of the type in bits. 118 // It panics if the type's Kind is not one of the 119 // sized or unsized Int, Uint, Float, or Complex kinds. 120 Bits() int 121 122 // ChanDir returns a channel type's direction. 123 // It panics if the type's Kind is not Chan. 124 ChanDir() ChanDir 125 126 // IsVariadic reports whether a function type's final input parameter 127 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 128 // implicit actual type []T. 129 // 130 // For concreteness, if t represents func(x int, y ... float64), then 131 // 132 // t.NumIn() == 2 133 // t.In(0) is the reflect.Type for "int" 134 // t.In(1) is the reflect.Type for "[]float64" 135 // t.IsVariadic() == true 136 // 137 // IsVariadic panics if the type's Kind is not Func. 138 IsVariadic() bool 139 140 // Elem returns a type's element type. 141 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 142 Elem() Type 143 144 // Field returns a struct type's i'th field. 145 // It panics if the type's Kind is not Struct. 146 // It panics if i is not in the range [0, NumField()). 147 Field(i int) StructField 148 149 // FieldByIndex returns the nested field corresponding 150 // to the index sequence. It is equivalent to calling Field 151 // successively for each index i. 152 // It panics if the type's Kind is not Struct. 153 FieldByIndex(index []int) StructField 154 155 // FieldByName returns the struct field with the given name 156 // and a boolean indicating if the field was found. 157 FieldByName(name string) (StructField, bool) 158 159 // FieldByNameFunc returns the struct field with a name 160 // that satisfies the match function and a boolean indicating if 161 // the field was found. 162 // 163 // FieldByNameFunc considers the fields in the struct itself 164 // and then the fields in any anonymous structs, in breadth first order, 165 // stopping at the shallowest nesting depth containing one or more 166 // fields satisfying the match function. If multiple fields at that depth 167 // satisfy the match function, they cancel each other 168 // and FieldByNameFunc returns no match. 169 // This behavior mirrors Go's handling of name lookup in 170 // structs containing anonymous fields. 171 FieldByNameFunc(match func(string) bool) (StructField, bool) 172 173 // In returns the type of a function type's i'th input parameter. 174 // It panics if the type's Kind is not Func. 175 // It panics if i is not in the range [0, NumIn()). 176 In(i int) Type 177 178 // Key returns a map type's key type. 179 // It panics if the type's Kind is not Map. 180 Key() Type 181 182 // Len returns an array type's length. 183 // It panics if the type's Kind is not Array. 184 Len() int 185 186 // NumField returns a struct type's field count. 187 // It panics if the type's Kind is not Struct. 188 NumField() int 189 190 // NumIn returns a function type's input parameter count. 191 // It panics if the type's Kind is not Func. 192 NumIn() int 193 194 // NumOut returns a function type's output parameter count. 195 // It panics if the type's Kind is not Func. 196 NumOut() int 197 198 // Out returns the type of a function type's i'th output parameter. 199 // It panics if the type's Kind is not Func. 200 // It panics if i is not in the range [0, NumOut()). 201 Out(i int) Type 202 203 common() *rtype 204 uncommon() *uncommonType 205 } 206 207 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 208 // if the names are equal, even if they are unexported names originating 209 // in different packages. The practical effect of this is that the result of 210 // t.FieldByName("x") is not well defined if the struct type t contains 211 // multiple fields named x (embedded from different packages). 212 // FieldByName may return one of the fields named x or may report that there are none. 213 // See golang.org/issue/4876 for more details. 214 215 /* 216 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 217 * A few are known to ../runtime/type.go to convey to debuggers. 218 * They are also known to ../runtime/type.go. 219 */ 220 221 // A Kind represents the specific kind of type that a Type represents. 222 // The zero Kind is not a valid kind. 223 type Kind uint 224 225 const ( 226 Invalid Kind = iota 227 Bool 228 Int 229 Int8 230 Int16 231 Int32 232 Int64 233 Uint 234 Uint8 235 Uint16 236 Uint32 237 Uint64 238 Uintptr 239 Float32 240 Float64 241 Complex64 242 Complex128 243 Array 244 Chan 245 Func 246 Interface 247 Map 248 Ptr 249 Slice 250 String 251 Struct 252 UnsafePointer 253 ) 254 255 // tflag is used by an rtype to signal what extra type information is 256 // available in the memory directly following the rtype value. 257 // 258 // tflag values must be kept in sync with copies in: 259 // cmd/compile/internal/gc/reflect.go 260 // cmd/link/internal/ld/decodesym.go 261 // runtime/type.go 262 type tflag uint8 263 264 const ( 265 // tflagUncommon means that there is a pointer, *uncommonType, 266 // just beyond the outer type structure. 267 // 268 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 269 // then t has uncommonType data and it can be accessed as: 270 // 271 // type tUncommon struct { 272 // structType 273 // u uncommonType 274 // } 275 // u := &(*tUncommon)(unsafe.Pointer(t)).u 276 tflagUncommon tflag = 1 << 0 277 278 // tflagExtraStar means the name in the str field has an 279 // extraneous '*' prefix. This is because for most types T in 280 // a program, the type *T also exists and reusing the str data 281 // saves binary size. 282 tflagExtraStar tflag = 1 << 1 283 284 // tflagNamed means the type has a name. 285 tflagNamed tflag = 1 << 2 286 ) 287 288 // rtype is the common implementation of most values. 289 // It is embedded in other, public struct types, but always 290 // with a unique tag like `reflect:"array"` or `reflect:"ptr"` 291 // so that code cannot convert from, say, *arrayType to *ptrType. 292 type rtype struct { 293 size uintptr 294 ptrdata uintptr 295 hash uint32 // hash of type; avoids computation in hash tables 296 tflag tflag // extra type information flags 297 align uint8 // alignment of variable with this type 298 fieldAlign uint8 // alignment of struct field with this type 299 kind uint8 // enumeration for C 300 alg *typeAlg // algorithm table 301 gcdata *byte // garbage collection data 302 str nameOff // string form 303 ptrToThis typeOff // type for pointer to this type, may be zero 304 } 305 306 // a copy of runtime.typeAlg 307 type typeAlg struct { 308 // function for hashing objects of this type 309 // (ptr to object, seed) -> hash 310 hash func(unsafe.Pointer, uintptr) uintptr 311 // function for comparing objects of this type 312 // (ptr to object A, ptr to object B) -> ==? 313 equal func(unsafe.Pointer, unsafe.Pointer) bool 314 } 315 316 // Method on non-interface type 317 type method struct { 318 name nameOff // name of method 319 mtyp typeOff // method type (without receiver) 320 ifn textOff // fn used in interface call (one-word receiver) 321 tfn textOff // fn used for normal method call 322 } 323 324 // uncommonType is present only for types with names or methods 325 // (if T is a named type, the uncommonTypes for T and *T have methods). 326 // Using a pointer to this struct reduces the overall size required 327 // to describe an unnamed type with no methods. 328 type uncommonType struct { 329 pkgPath nameOff // import path; empty for built-in types like int, string 330 mcount uint16 // number of methods 331 _ uint16 // unused 332 moff uint32 // offset from this uncommontype to [mcount]method 333 _ uint32 // unused 334 } 335 336 // ChanDir represents a channel type's direction. 337 type ChanDir int 338 339 const ( 340 RecvDir ChanDir = 1 << iota // <-chan 341 SendDir // chan<- 342 BothDir = RecvDir | SendDir // chan 343 ) 344 345 // arrayType represents a fixed array type. 346 type arrayType struct { 347 rtype `reflect:"array"` 348 elem *rtype // array element type 349 slice *rtype // slice type 350 len uintptr 351 } 352 353 // chanType represents a channel type. 354 type chanType struct { 355 rtype `reflect:"chan"` 356 elem *rtype // channel element type 357 dir uintptr // channel direction (ChanDir) 358 } 359 360 // funcType represents a function type. 361 // 362 // A *rtype for each in and out parameter is stored in an array that 363 // directly follows the funcType (and possibly its uncommonType). So 364 // a function type with one method, one input, and one output is: 365 // 366 // struct { 367 // funcType 368 // uncommonType 369 // [2]*rtype // [0] is in, [1] is out 370 // } 371 type funcType struct { 372 rtype `reflect:"func"` 373 inCount uint16 374 outCount uint16 // top bit is set if last input parameter is ... 375 } 376 377 // imethod represents a method on an interface type 378 type imethod struct { 379 name nameOff // name of method 380 typ typeOff // .(*FuncType) underneath 381 } 382 383 // interfaceType represents an interface type. 384 type interfaceType struct { 385 rtype `reflect:"interface"` 386 pkgPath name // import path 387 methods []imethod // sorted by hash 388 } 389 390 // mapType represents a map type. 391 type mapType struct { 392 rtype `reflect:"map"` 393 key *rtype // map key type 394 elem *rtype // map element (value) type 395 bucket *rtype // internal bucket structure 396 hmap *rtype // internal map header 397 keysize uint8 // size of key slot 398 indirectkey uint8 // store ptr to key instead of key itself 399 valuesize uint8 // size of value slot 400 indirectvalue uint8 // store ptr to value instead of value itself 401 bucketsize uint16 // size of bucket 402 reflexivekey bool // true if k==k for all keys 403 needkeyupdate bool // true if we need to update key on an overwrite 404 } 405 406 // ptrType represents a pointer type. 407 type ptrType struct { 408 rtype `reflect:"ptr"` 409 elem *rtype // pointer element (pointed at) type 410 } 411 412 // sliceType represents a slice type. 413 type sliceType struct { 414 rtype `reflect:"slice"` 415 elem *rtype // slice element type 416 } 417 418 // Struct field 419 type structField struct { 420 name name // name is empty for embedded fields 421 typ *rtype // type of field 422 offset uintptr // byte offset of field within struct 423 } 424 425 // structType represents a struct type. 426 type structType struct { 427 rtype `reflect:"struct"` 428 pkgPath name 429 fields []structField // sorted by offset 430 } 431 432 // name is an encoded type name with optional extra data. 433 // 434 // The first byte is a bit field containing: 435 // 436 // 1<<0 the name is exported 437 // 1<<1 tag data follows the name 438 // 1<<2 pkgPath nameOff follows the name and tag 439 // 440 // The next two bytes are the data length: 441 // 442 // l := uint16(data[1])<<8 | uint16(data[2]) 443 // 444 // Bytes [3:3+l] are the string data. 445 // 446 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 447 // with the data following. 448 // 449 // If the import path follows, then 4 bytes at the end of 450 // the data form a nameOff. The import path is only set for concrete 451 // methods that are defined in a different package than their type. 452 // 453 // If a name starts with "*", then the exported bit represents 454 // whether the pointed to type is exported. 455 type name struct { 456 bytes *byte 457 } 458 459 func (n name) data(off int) *byte { 460 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 461 } 462 463 func (n name) isExported() bool { 464 return (*n.bytes)&(1<<0) != 0 465 } 466 467 func (n name) nameLen() int { 468 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 469 } 470 471 func (n name) tagLen() int { 472 if *n.data(0)&(1<<1) == 0 { 473 return 0 474 } 475 off := 3 + n.nameLen() 476 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 477 } 478 479 func (n name) name() (s string) { 480 if n.bytes == nil { 481 return 482 } 483 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 484 485 hdr := (*stringHeader)(unsafe.Pointer(&s)) 486 hdr.Data = unsafe.Pointer(&b[3]) 487 hdr.Len = int(b[1])<<8 | int(b[2]) 488 return s 489 } 490 491 func (n name) tag() (s string) { 492 tl := n.tagLen() 493 if tl == 0 { 494 return "" 495 } 496 nl := n.nameLen() 497 hdr := (*stringHeader)(unsafe.Pointer(&s)) 498 hdr.Data = unsafe.Pointer(n.data(3 + nl + 2)) 499 hdr.Len = tl 500 return s 501 } 502 503 func (n name) pkgPath() string { 504 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 505 return "" 506 } 507 off := 3 + n.nameLen() 508 if tl := n.tagLen(); tl > 0 { 509 off += 2 + tl 510 } 511 var nameOff int32 512 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 513 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 514 return pkgPathName.name() 515 } 516 517 // round n up to a multiple of a. a must be a power of 2. 518 func round(n, a uintptr) uintptr { 519 return (n + a - 1) &^ (a - 1) 520 } 521 522 func newName(n, tag, pkgPath string, exported bool) name { 523 if len(n) > 1<<16-1 { 524 panic("reflect.nameFrom: name too long: " + n) 525 } 526 if len(tag) > 1<<16-1 { 527 panic("reflect.nameFrom: tag too long: " + tag) 528 } 529 530 var bits byte 531 l := 1 + 2 + len(n) 532 if exported { 533 bits |= 1 << 0 534 } 535 if len(tag) > 0 { 536 l += 2 + len(tag) 537 bits |= 1 << 1 538 } 539 if pkgPath != "" { 540 bits |= 1 << 2 541 } 542 543 b := make([]byte, l) 544 b[0] = bits 545 b[1] = uint8(len(n) >> 8) 546 b[2] = uint8(len(n)) 547 copy(b[3:], n) 548 if len(tag) > 0 { 549 tb := b[3+len(n):] 550 tb[0] = uint8(len(tag) >> 8) 551 tb[1] = uint8(len(tag)) 552 copy(tb[2:], tag) 553 } 554 555 if pkgPath != "" { 556 panic("reflect: creating a name with a package path is not supported") 557 } 558 559 return name{bytes: &b[0]} 560 } 561 562 /* 563 * The compiler knows the exact layout of all the data structures above. 564 * The compiler does not know about the data structures and methods below. 565 */ 566 567 // Method represents a single method. 568 type Method struct { 569 // Name is the method name. 570 // PkgPath is the package path that qualifies a lower case (unexported) 571 // method name. It is empty for upper case (exported) method names. 572 // The combination of PkgPath and Name uniquely identifies a method 573 // in a method set. 574 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 575 Name string 576 PkgPath string 577 578 Type Type // method type 579 Func Value // func with receiver as first argument 580 Index int // index for Type.Method 581 } 582 583 const ( 584 kindDirectIface = 1 << 5 585 kindGCProg = 1 << 6 // Type.gc points to GC program 586 kindNoPointers = 1 << 7 587 kindMask = (1 << 5) - 1 588 ) 589 590 func (k Kind) String() string { 591 if int(k) < len(kindNames) { 592 return kindNames[k] 593 } 594 return "kind" + strconv.Itoa(int(k)) 595 } 596 597 var kindNames = []string{ 598 Invalid: "invalid", 599 Bool: "bool", 600 Int: "int", 601 Int8: "int8", 602 Int16: "int16", 603 Int32: "int32", 604 Int64: "int64", 605 Uint: "uint", 606 Uint8: "uint8", 607 Uint16: "uint16", 608 Uint32: "uint32", 609 Uint64: "uint64", 610 Uintptr: "uintptr", 611 Float32: "float32", 612 Float64: "float64", 613 Complex64: "complex64", 614 Complex128: "complex128", 615 Array: "array", 616 Chan: "chan", 617 Func: "func", 618 Interface: "interface", 619 Map: "map", 620 Ptr: "ptr", 621 Slice: "slice", 622 String: "string", 623 Struct: "struct", 624 UnsafePointer: "unsafe.Pointer", 625 } 626 627 func (t *uncommonType) methods() []method { 628 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount] 629 } 630 631 // resolveNameOff resolves a name offset from a base pointer. 632 // The (*rtype).nameOff method is a convenience wrapper for this function. 633 // Implemented in the runtime package. 634 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 635 636 // resolveTypeOff resolves an *rtype offset from a base type. 637 // The (*rtype).typeOff method is a convenience wrapper for this function. 638 // Implemented in the runtime package. 639 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 640 641 // resolveTextOff resolves an function pointer offset from a base type. 642 // The (*rtype).textOff method is a convenience wrapper for this function. 643 // Implemented in the runtime package. 644 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 645 646 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 647 // It returns a new ID that can be used as a typeOff or textOff, and will 648 // be resolved correctly. Implemented in the runtime package. 649 func addReflectOff(ptr unsafe.Pointer) int32 650 651 // resolveReflectType adds a name to the reflection lookup map in the runtime. 652 // It returns a new nameOff that can be used to refer to the pointer. 653 func resolveReflectName(n name) nameOff { 654 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 655 } 656 657 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 658 // It returns a new typeOff that can be used to refer to the pointer. 659 func resolveReflectType(t *rtype) typeOff { 660 return typeOff(addReflectOff(unsafe.Pointer(t))) 661 } 662 663 // resolveReflectText adds a function pointer to the reflection lookup map in 664 // the runtime. It returns a new textOff that can be used to refer to the 665 // pointer. 666 func resolveReflectText(ptr unsafe.Pointer) textOff { 667 return textOff(addReflectOff(ptr)) 668 } 669 670 type nameOff int32 // offset to a name 671 type typeOff int32 // offset to an *rtype 672 type textOff int32 // offset from top of text section 673 674 func (t *rtype) nameOff(off nameOff) name { 675 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 676 } 677 678 func (t *rtype) typeOff(off typeOff) *rtype { 679 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 680 } 681 682 func (t *rtype) textOff(off textOff) unsafe.Pointer { 683 return resolveTextOff(unsafe.Pointer(t), int32(off)) 684 } 685 686 func (t *rtype) uncommon() *uncommonType { 687 if t.tflag&tflagUncommon == 0 { 688 return nil 689 } 690 switch t.Kind() { 691 case Struct: 692 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 693 case Ptr: 694 type u struct { 695 ptrType 696 u uncommonType 697 } 698 return &(*u)(unsafe.Pointer(t)).u 699 case Func: 700 type u struct { 701 funcType 702 u uncommonType 703 } 704 return &(*u)(unsafe.Pointer(t)).u 705 case Slice: 706 type u struct { 707 sliceType 708 u uncommonType 709 } 710 return &(*u)(unsafe.Pointer(t)).u 711 case Array: 712 type u struct { 713 arrayType 714 u uncommonType 715 } 716 return &(*u)(unsafe.Pointer(t)).u 717 case Chan: 718 type u struct { 719 chanType 720 u uncommonType 721 } 722 return &(*u)(unsafe.Pointer(t)).u 723 case Map: 724 type u struct { 725 mapType 726 u uncommonType 727 } 728 return &(*u)(unsafe.Pointer(t)).u 729 case Interface: 730 type u struct { 731 interfaceType 732 u uncommonType 733 } 734 return &(*u)(unsafe.Pointer(t)).u 735 default: 736 type u struct { 737 rtype 738 u uncommonType 739 } 740 return &(*u)(unsafe.Pointer(t)).u 741 } 742 } 743 744 func (t *rtype) String() string { 745 s := t.nameOff(t.str).name() 746 if t.tflag&tflagExtraStar != 0 { 747 return s[1:] 748 } 749 return s 750 } 751 752 func (t *rtype) Size() uintptr { return t.size } 753 754 func (t *rtype) Bits() int { 755 if t == nil { 756 panic("reflect: Bits of nil Type") 757 } 758 k := t.Kind() 759 if k < Int || k > Complex128 { 760 panic("reflect: Bits of non-arithmetic Type " + t.String()) 761 } 762 return int(t.size) * 8 763 } 764 765 func (t *rtype) Align() int { return int(t.align) } 766 767 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 768 769 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 770 771 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 } 772 773 func (t *rtype) common() *rtype { return t } 774 775 var methodCache struct { 776 sync.RWMutex 777 m map[*rtype][]method 778 } 779 780 func (t *rtype) exportedMethods() []method { 781 methodCache.RLock() 782 methods, found := methodCache.m[t] 783 methodCache.RUnlock() 784 785 if found { 786 return methods 787 } 788 789 ut := t.uncommon() 790 if ut == nil { 791 return nil 792 } 793 allm := ut.methods() 794 allExported := true 795 for _, m := range allm { 796 name := t.nameOff(m.name) 797 if !name.isExported() { 798 allExported = false 799 break 800 } 801 } 802 if allExported { 803 methods = allm 804 } else { 805 methods = make([]method, 0, len(allm)) 806 for _, m := range allm { 807 name := t.nameOff(m.name) 808 if name.isExported() { 809 methods = append(methods, m) 810 } 811 } 812 methods = methods[:len(methods):len(methods)] 813 } 814 815 methodCache.Lock() 816 if methodCache.m == nil { 817 methodCache.m = make(map[*rtype][]method) 818 } 819 methodCache.m[t] = methods 820 methodCache.Unlock() 821 822 return methods 823 } 824 825 func (t *rtype) NumMethod() int { 826 if t.Kind() == Interface { 827 tt := (*interfaceType)(unsafe.Pointer(t)) 828 return tt.NumMethod() 829 } 830 if t.tflag&tflagUncommon == 0 { 831 return 0 // avoid methodCache lock in zero case 832 } 833 return len(t.exportedMethods()) 834 } 835 836 func (t *rtype) Method(i int) (m Method) { 837 if t.Kind() == Interface { 838 tt := (*interfaceType)(unsafe.Pointer(t)) 839 return tt.Method(i) 840 } 841 methods := t.exportedMethods() 842 if i < 0 || i >= len(methods) { 843 panic("reflect: Method index out of range") 844 } 845 p := methods[i] 846 pname := t.nameOff(p.name) 847 m.Name = pname.name() 848 fl := flag(Func) 849 mtyp := t.typeOff(p.mtyp) 850 ft := (*funcType)(unsafe.Pointer(mtyp)) 851 in := make([]Type, 0, 1+len(ft.in())) 852 in = append(in, t) 853 for _, arg := range ft.in() { 854 in = append(in, arg) 855 } 856 out := make([]Type, 0, len(ft.out())) 857 for _, ret := range ft.out() { 858 out = append(out, ret) 859 } 860 mt := FuncOf(in, out, ft.IsVariadic()) 861 m.Type = mt 862 tfn := t.textOff(p.tfn) 863 fn := unsafe.Pointer(&tfn) 864 m.Func = Value{mt.(*rtype), fn, fl} 865 866 m.Index = i 867 return m 868 } 869 870 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 871 if t.Kind() == Interface { 872 tt := (*interfaceType)(unsafe.Pointer(t)) 873 return tt.MethodByName(name) 874 } 875 ut := t.uncommon() 876 if ut == nil { 877 return Method{}, false 878 } 879 utmethods := ut.methods() 880 for i := 0; i < int(ut.mcount); i++ { 881 p := utmethods[i] 882 pname := t.nameOff(p.name) 883 if pname.isExported() && pname.name() == name { 884 return t.Method(i), true 885 } 886 } 887 return Method{}, false 888 } 889 890 func (t *rtype) PkgPath() string { 891 if t.tflag&tflagNamed == 0 { 892 return "" 893 } 894 ut := t.uncommon() 895 if ut == nil { 896 return "" 897 } 898 return t.nameOff(ut.pkgPath).name() 899 } 900 901 func hasPrefix(s, prefix string) bool { 902 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 903 } 904 905 func (t *rtype) Name() string { 906 if t.tflag&tflagNamed == 0 { 907 return "" 908 } 909 s := t.String() 910 i := len(s) - 1 911 for i >= 0 { 912 if s[i] == '.' { 913 break 914 } 915 i-- 916 } 917 return s[i+1:] 918 } 919 920 func (t *rtype) ChanDir() ChanDir { 921 if t.Kind() != Chan { 922 panic("reflect: ChanDir of non-chan type") 923 } 924 tt := (*chanType)(unsafe.Pointer(t)) 925 return ChanDir(tt.dir) 926 } 927 928 func (t *rtype) IsVariadic() bool { 929 if t.Kind() != Func { 930 panic("reflect: IsVariadic of non-func type") 931 } 932 tt := (*funcType)(unsafe.Pointer(t)) 933 return tt.outCount&(1<<15) != 0 934 } 935 936 func (t *rtype) Elem() Type { 937 switch t.Kind() { 938 case Array: 939 tt := (*arrayType)(unsafe.Pointer(t)) 940 return toType(tt.elem) 941 case Chan: 942 tt := (*chanType)(unsafe.Pointer(t)) 943 return toType(tt.elem) 944 case Map: 945 tt := (*mapType)(unsafe.Pointer(t)) 946 return toType(tt.elem) 947 case Ptr: 948 tt := (*ptrType)(unsafe.Pointer(t)) 949 return toType(tt.elem) 950 case Slice: 951 tt := (*sliceType)(unsafe.Pointer(t)) 952 return toType(tt.elem) 953 } 954 panic("reflect: Elem of invalid type") 955 } 956 957 func (t *rtype) Field(i int) StructField { 958 if t.Kind() != Struct { 959 panic("reflect: Field of non-struct type") 960 } 961 tt := (*structType)(unsafe.Pointer(t)) 962 return tt.Field(i) 963 } 964 965 func (t *rtype) FieldByIndex(index []int) StructField { 966 if t.Kind() != Struct { 967 panic("reflect: FieldByIndex of non-struct type") 968 } 969 tt := (*structType)(unsafe.Pointer(t)) 970 return tt.FieldByIndex(index) 971 } 972 973 func (t *rtype) FieldByName(name string) (StructField, bool) { 974 if t.Kind() != Struct { 975 panic("reflect: FieldByName of non-struct type") 976 } 977 tt := (*structType)(unsafe.Pointer(t)) 978 return tt.FieldByName(name) 979 } 980 981 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 982 if t.Kind() != Struct { 983 panic("reflect: FieldByNameFunc of non-struct type") 984 } 985 tt := (*structType)(unsafe.Pointer(t)) 986 return tt.FieldByNameFunc(match) 987 } 988 989 func (t *rtype) In(i int) Type { 990 if t.Kind() != Func { 991 panic("reflect: In of non-func type") 992 } 993 tt := (*funcType)(unsafe.Pointer(t)) 994 return toType(tt.in()[i]) 995 } 996 997 func (t *rtype) Key() Type { 998 if t.Kind() != Map { 999 panic("reflect: Key of non-map type") 1000 } 1001 tt := (*mapType)(unsafe.Pointer(t)) 1002 return toType(tt.key) 1003 } 1004 1005 func (t *rtype) Len() int { 1006 if t.Kind() != Array { 1007 panic("reflect: Len of non-array type") 1008 } 1009 tt := (*arrayType)(unsafe.Pointer(t)) 1010 return int(tt.len) 1011 } 1012 1013 func (t *rtype) NumField() int { 1014 if t.Kind() != Struct { 1015 panic("reflect: NumField of non-struct type") 1016 } 1017 tt := (*structType)(unsafe.Pointer(t)) 1018 return len(tt.fields) 1019 } 1020 1021 func (t *rtype) NumIn() int { 1022 if t.Kind() != Func { 1023 panic("reflect: NumIn of non-func type") 1024 } 1025 tt := (*funcType)(unsafe.Pointer(t)) 1026 return int(tt.inCount) 1027 } 1028 1029 func (t *rtype) NumOut() int { 1030 if t.Kind() != Func { 1031 panic("reflect: NumOut of non-func type") 1032 } 1033 tt := (*funcType)(unsafe.Pointer(t)) 1034 return len(tt.out()) 1035 } 1036 1037 func (t *rtype) Out(i int) Type { 1038 if t.Kind() != Func { 1039 panic("reflect: Out of non-func type") 1040 } 1041 tt := (*funcType)(unsafe.Pointer(t)) 1042 return toType(tt.out()[i]) 1043 } 1044 1045 func (t *funcType) in() []*rtype { 1046 uadd := unsafe.Sizeof(*t) 1047 if t.tflag&tflagUncommon != 0 { 1048 uadd += unsafe.Sizeof(uncommonType{}) 1049 } 1050 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount] 1051 } 1052 1053 func (t *funcType) out() []*rtype { 1054 uadd := unsafe.Sizeof(*t) 1055 if t.tflag&tflagUncommon != 0 { 1056 uadd += unsafe.Sizeof(uncommonType{}) 1057 } 1058 outCount := t.outCount & (1<<15 - 1) 1059 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 1060 } 1061 1062 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { 1063 return unsafe.Pointer(uintptr(p) + x) 1064 } 1065 1066 func (d ChanDir) String() string { 1067 switch d { 1068 case SendDir: 1069 return "chan<-" 1070 case RecvDir: 1071 return "<-chan" 1072 case BothDir: 1073 return "chan" 1074 } 1075 return "ChanDir" + strconv.Itoa(int(d)) 1076 } 1077 1078 // Method returns the i'th method in the type's method set. 1079 func (t *interfaceType) Method(i int) (m Method) { 1080 if i < 0 || i >= len(t.methods) { 1081 return 1082 } 1083 p := &t.methods[i] 1084 pname := t.nameOff(p.name) 1085 m.Name = pname.name() 1086 if !pname.isExported() { 1087 m.PkgPath = pname.pkgPath() 1088 if m.PkgPath == "" { 1089 m.PkgPath = t.pkgPath.name() 1090 } 1091 } 1092 m.Type = toType(t.typeOff(p.typ)) 1093 m.Index = i 1094 return 1095 } 1096 1097 // NumMethod returns the number of interface methods in the type's method set. 1098 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1099 1100 // MethodByName method with the given name in the type's method set. 1101 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1102 if t == nil { 1103 return 1104 } 1105 var p *imethod 1106 for i := range t.methods { 1107 p = &t.methods[i] 1108 if t.nameOff(p.name).name() == name { 1109 return t.Method(i), true 1110 } 1111 } 1112 return 1113 } 1114 1115 // A StructField describes a single field in a struct. 1116 type StructField struct { 1117 // Name is the field name. 1118 Name string 1119 // PkgPath is the package path that qualifies a lower case (unexported) 1120 // field name. It is empty for upper case (exported) field names. 1121 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1122 PkgPath string 1123 1124 Type Type // field type 1125 Tag StructTag // field tag string 1126 Offset uintptr // offset within struct, in bytes 1127 Index []int // index sequence for Type.FieldByIndex 1128 Anonymous bool // is an embedded field 1129 } 1130 1131 // A StructTag is the tag string in a struct field. 1132 // 1133 // By convention, tag strings are a concatenation of 1134 // optionally space-separated key:"value" pairs. 1135 // Each key is a non-empty string consisting of non-control 1136 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1137 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1138 // characters and Go string literal syntax. 1139 type StructTag string 1140 1141 // Get returns the value associated with key in the tag string. 1142 // If there is no such key in the tag, Get returns the empty string. 1143 // If the tag does not have the conventional format, the value 1144 // returned by Get is unspecified. To determine whether a tag is 1145 // explicitly set to the empty string, use Lookup. 1146 func (tag StructTag) Get(key string) string { 1147 v, _ := tag.Lookup(key) 1148 return v 1149 } 1150 1151 // Lookup returns the value associated with key in the tag string. 1152 // If the key is present in the tag the value (which may be empty) 1153 // is returned. Otherwise the returned value will be the empty string. 1154 // The ok return value reports whether the value was explicitly set in 1155 // the tag string. If the tag does not have the conventional format, 1156 // the value returned by Lookup is unspecified. 1157 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1158 // When modifying this code, also update the validateStructTag code 1159 // in cmd/vet/structtag.go. 1160 1161 for tag != "" { 1162 // Skip leading space. 1163 i := 0 1164 for i < len(tag) && tag[i] == ' ' { 1165 i++ 1166 } 1167 tag = tag[i:] 1168 if tag == "" { 1169 break 1170 } 1171 1172 // Scan to colon. A space, a quote or a control character is a syntax error. 1173 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1174 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1175 // as it is simpler to inspect the tag's bytes than the tag's runes. 1176 i = 0 1177 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1178 i++ 1179 } 1180 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1181 break 1182 } 1183 name := string(tag[:i]) 1184 tag = tag[i+1:] 1185 1186 // Scan quoted string to find value. 1187 i = 1 1188 for i < len(tag) && tag[i] != '"' { 1189 if tag[i] == '\\' { 1190 i++ 1191 } 1192 i++ 1193 } 1194 if i >= len(tag) { 1195 break 1196 } 1197 qvalue := string(tag[:i+1]) 1198 tag = tag[i+1:] 1199 1200 if key == name { 1201 value, err := strconv.Unquote(qvalue) 1202 if err != nil { 1203 break 1204 } 1205 return value, true 1206 } 1207 } 1208 return "", false 1209 } 1210 1211 // Field returns the i'th struct field. 1212 func (t *structType) Field(i int) (f StructField) { 1213 if i < 0 || i >= len(t.fields) { 1214 panic("reflect: Field index out of bounds") 1215 } 1216 p := &t.fields[i] 1217 f.Type = toType(p.typ) 1218 if name := p.name.name(); name != "" { 1219 f.Name = name 1220 } else { 1221 t := f.Type 1222 if t.Kind() == Ptr { 1223 t = t.Elem() 1224 } 1225 f.Name = t.Name() 1226 f.Anonymous = true 1227 } 1228 if !p.name.isExported() { 1229 f.PkgPath = p.name.pkgPath() 1230 if f.PkgPath == "" { 1231 f.PkgPath = t.pkgPath.name() 1232 } 1233 } 1234 if tag := p.name.tag(); tag != "" { 1235 f.Tag = StructTag(tag) 1236 } 1237 f.Offset = p.offset 1238 1239 // NOTE(rsc): This is the only allocation in the interface 1240 // presented by a reflect.Type. It would be nice to avoid, 1241 // at least in the common cases, but we need to make sure 1242 // that misbehaving clients of reflect cannot affect other 1243 // uses of reflect. One possibility is CL 5371098, but we 1244 // postponed that ugliness until there is a demonstrated 1245 // need for the performance. This is issue 2320. 1246 f.Index = []int{i} 1247 return 1248 } 1249 1250 // TODO(gri): Should there be an error/bool indicator if the index 1251 // is wrong for FieldByIndex? 1252 1253 // FieldByIndex returns the nested field corresponding to index. 1254 func (t *structType) FieldByIndex(index []int) (f StructField) { 1255 f.Type = toType(&t.rtype) 1256 for i, x := range index { 1257 if i > 0 { 1258 ft := f.Type 1259 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1260 ft = ft.Elem() 1261 } 1262 f.Type = ft 1263 } 1264 f = f.Type.Field(x) 1265 } 1266 return 1267 } 1268 1269 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1270 type fieldScan struct { 1271 typ *structType 1272 index []int 1273 } 1274 1275 // FieldByNameFunc returns the struct field with a name that satisfies the 1276 // match function and a boolean to indicate if the field was found. 1277 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1278 // This uses the same condition that the Go language does: there must be a unique instance 1279 // of the match at a given depth level. If there are multiple instances of a match at the 1280 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1281 // The algorithm is breadth first search, one depth level at a time. 1282 1283 // The current and next slices are work queues: 1284 // current lists the fields to visit on this depth level, 1285 // and next lists the fields on the next lower level. 1286 current := []fieldScan{} 1287 next := []fieldScan{{typ: t}} 1288 1289 // nextCount records the number of times an embedded type has been 1290 // encountered and considered for queueing in the 'next' slice. 1291 // We only queue the first one, but we increment the count on each. 1292 // If a struct type T can be reached more than once at a given depth level, 1293 // then it annihilates itself and need not be considered at all when we 1294 // process that next depth level. 1295 var nextCount map[*structType]int 1296 1297 // visited records the structs that have been considered already. 1298 // Embedded pointer fields can create cycles in the graph of 1299 // reachable embedded types; visited avoids following those cycles. 1300 // It also avoids duplicated effort: if we didn't find the field in an 1301 // embedded type T at level 2, we won't find it in one at level 4 either. 1302 visited := map[*structType]bool{} 1303 1304 for len(next) > 0 { 1305 current, next = next, current[:0] 1306 count := nextCount 1307 nextCount = nil 1308 1309 // Process all the fields at this depth, now listed in 'current'. 1310 // The loop queues embedded fields found in 'next', for processing during the next 1311 // iteration. The multiplicity of the 'current' field counts is recorded 1312 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1313 for _, scan := range current { 1314 t := scan.typ 1315 if visited[t] { 1316 // We've looked through this type before, at a higher level. 1317 // That higher level would shadow the lower level we're now at, 1318 // so this one can't be useful to us. Ignore it. 1319 continue 1320 } 1321 visited[t] = true 1322 for i := range t.fields { 1323 f := &t.fields[i] 1324 // Find name and type for field f. 1325 var fname string 1326 var ntyp *rtype 1327 if name := f.name.name(); name != "" { 1328 fname = name 1329 } else { 1330 // Anonymous field of type T or *T. 1331 // Name taken from type. 1332 ntyp = f.typ 1333 if ntyp.Kind() == Ptr { 1334 ntyp = ntyp.Elem().common() 1335 } 1336 fname = ntyp.Name() 1337 } 1338 1339 // Does it match? 1340 if match(fname) { 1341 // Potential match 1342 if count[t] > 1 || ok { 1343 // Name appeared multiple times at this level: annihilate. 1344 return StructField{}, false 1345 } 1346 result = t.Field(i) 1347 result.Index = nil 1348 result.Index = append(result.Index, scan.index...) 1349 result.Index = append(result.Index, i) 1350 ok = true 1351 continue 1352 } 1353 1354 // Queue embedded struct fields for processing with next level, 1355 // but only if we haven't seen a match yet at this level and only 1356 // if the embedded types haven't already been queued. 1357 if ok || ntyp == nil || ntyp.Kind() != Struct { 1358 continue 1359 } 1360 styp := (*structType)(unsafe.Pointer(ntyp)) 1361 if nextCount[styp] > 0 { 1362 nextCount[styp] = 2 // exact multiple doesn't matter 1363 continue 1364 } 1365 if nextCount == nil { 1366 nextCount = map[*structType]int{} 1367 } 1368 nextCount[styp] = 1 1369 if count[t] > 1 { 1370 nextCount[styp] = 2 // exact multiple doesn't matter 1371 } 1372 var index []int 1373 index = append(index, scan.index...) 1374 index = append(index, i) 1375 next = append(next, fieldScan{styp, index}) 1376 } 1377 } 1378 if ok { 1379 break 1380 } 1381 } 1382 return 1383 } 1384 1385 // FieldByName returns the struct field with the given name 1386 // and a boolean to indicate if the field was found. 1387 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1388 // Quick check for top-level name, or struct without anonymous fields. 1389 hasAnon := false 1390 if name != "" { 1391 for i := range t.fields { 1392 tf := &t.fields[i] 1393 tfname := tf.name.name() 1394 if tfname == "" { 1395 hasAnon = true 1396 continue 1397 } 1398 if tfname == name { 1399 return t.Field(i), true 1400 } 1401 } 1402 } 1403 if !hasAnon { 1404 return 1405 } 1406 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1407 } 1408 1409 // TypeOf returns the reflection Type that represents the dynamic type of i. 1410 // If i is a nil interface value, TypeOf returns nil. 1411 func TypeOf(i interface{}) Type { 1412 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1413 return toType(eface.typ) 1414 } 1415 1416 // ptrMap is the cache for PtrTo. 1417 var ptrMap struct { 1418 sync.RWMutex 1419 m map[*rtype]*ptrType 1420 } 1421 1422 // PtrTo returns the pointer type with element t. 1423 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1424 func PtrTo(t Type) Type { 1425 return t.(*rtype).ptrTo() 1426 } 1427 1428 func (t *rtype) ptrTo() *rtype { 1429 if t.ptrToThis != 0 { 1430 return t.typeOff(t.ptrToThis) 1431 } 1432 1433 // Check the cache. 1434 ptrMap.RLock() 1435 if m := ptrMap.m; m != nil { 1436 if p := m[t]; p != nil { 1437 ptrMap.RUnlock() 1438 return &p.rtype 1439 } 1440 } 1441 ptrMap.RUnlock() 1442 1443 ptrMap.Lock() 1444 if ptrMap.m == nil { 1445 ptrMap.m = make(map[*rtype]*ptrType) 1446 } 1447 p := ptrMap.m[t] 1448 if p != nil { 1449 // some other goroutine won the race and created it 1450 ptrMap.Unlock() 1451 return &p.rtype 1452 } 1453 1454 // Look in known types. 1455 s := "*" + t.String() 1456 for _, tt := range typesByString(s) { 1457 p = (*ptrType)(unsafe.Pointer(tt)) 1458 if p.elem == t { 1459 ptrMap.m[t] = p 1460 ptrMap.Unlock() 1461 return &p.rtype 1462 } 1463 } 1464 1465 // Create a new ptrType starting with the description 1466 // of an *unsafe.Pointer. 1467 var iptr interface{} = (*unsafe.Pointer)(nil) 1468 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1469 pp := *prototype 1470 1471 pp.str = resolveReflectName(newName(s, "", "", false)) 1472 1473 // For the type structures linked into the binary, the 1474 // compiler provides a good hash of the string. 1475 // Create a good hash for the new string by using 1476 // the FNV-1 hash's mixing function to combine the 1477 // old hash and the new "*". 1478 pp.hash = fnv1(t.hash, '*') 1479 1480 pp.elem = t 1481 1482 ptrMap.m[t] = &pp 1483 ptrMap.Unlock() 1484 return &pp.rtype 1485 } 1486 1487 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1488 func fnv1(x uint32, list ...byte) uint32 { 1489 for _, b := range list { 1490 x = x*16777619 ^ uint32(b) 1491 } 1492 return x 1493 } 1494 1495 func (t *rtype) Implements(u Type) bool { 1496 if u == nil { 1497 panic("reflect: nil type passed to Type.Implements") 1498 } 1499 if u.Kind() != Interface { 1500 panic("reflect: non-interface type passed to Type.Implements") 1501 } 1502 return implements(u.(*rtype), t) 1503 } 1504 1505 func (t *rtype) AssignableTo(u Type) bool { 1506 if u == nil { 1507 panic("reflect: nil type passed to Type.AssignableTo") 1508 } 1509 uu := u.(*rtype) 1510 return directlyAssignable(uu, t) || implements(uu, t) 1511 } 1512 1513 func (t *rtype) ConvertibleTo(u Type) bool { 1514 if u == nil { 1515 panic("reflect: nil type passed to Type.ConvertibleTo") 1516 } 1517 uu := u.(*rtype) 1518 return convertOp(uu, t) != nil 1519 } 1520 1521 func (t *rtype) Comparable() bool { 1522 return t.alg != nil && t.alg.equal != nil 1523 } 1524 1525 // implements reports whether the type V implements the interface type T. 1526 func implements(T, V *rtype) bool { 1527 if T.Kind() != Interface { 1528 return false 1529 } 1530 t := (*interfaceType)(unsafe.Pointer(T)) 1531 if len(t.methods) == 0 { 1532 return true 1533 } 1534 1535 // The same algorithm applies in both cases, but the 1536 // method tables for an interface type and a concrete type 1537 // are different, so the code is duplicated. 1538 // In both cases the algorithm is a linear scan over the two 1539 // lists - T's methods and V's methods - simultaneously. 1540 // Since method tables are stored in a unique sorted order 1541 // (alphabetical, with no duplicate method names), the scan 1542 // through V's methods must hit a match for each of T's 1543 // methods along the way, or else V does not implement T. 1544 // This lets us run the scan in overall linear time instead of 1545 // the quadratic time a naive search would require. 1546 // See also ../runtime/iface.go. 1547 if V.Kind() == Interface { 1548 v := (*interfaceType)(unsafe.Pointer(V)) 1549 i := 0 1550 for j := 0; j < len(v.methods); j++ { 1551 tm := &t.methods[i] 1552 vm := &v.methods[j] 1553 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1554 if i++; i >= len(t.methods) { 1555 return true 1556 } 1557 } 1558 } 1559 return false 1560 } 1561 1562 v := V.uncommon() 1563 if v == nil { 1564 return false 1565 } 1566 i := 0 1567 vmethods := v.methods() 1568 for j := 0; j < int(v.mcount); j++ { 1569 tm := &t.methods[i] 1570 vm := vmethods[j] 1571 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1572 if i++; i >= len(t.methods) { 1573 return true 1574 } 1575 } 1576 } 1577 return false 1578 } 1579 1580 // directlyAssignable reports whether a value x of type V can be directly 1581 // assigned (using memmove) to a value of type T. 1582 // https://golang.org/doc/go_spec.html#Assignability 1583 // Ignoring the interface rules (implemented elsewhere) 1584 // and the ideal constant rules (no ideal constants at run time). 1585 func directlyAssignable(T, V *rtype) bool { 1586 // x's type V is identical to T? 1587 if T == V { 1588 return true 1589 } 1590 1591 // Otherwise at least one of T and V must be unnamed 1592 // and they must have the same kind. 1593 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { 1594 return false 1595 } 1596 1597 // x's type T and V must have identical underlying types. 1598 return haveIdenticalUnderlyingType(T, V, true) 1599 } 1600 1601 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1602 if cmpTags { 1603 return T == V 1604 } 1605 1606 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1607 return false 1608 } 1609 1610 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1611 } 1612 1613 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1614 if T == V { 1615 return true 1616 } 1617 1618 kind := T.Kind() 1619 if kind != V.Kind() { 1620 return false 1621 } 1622 1623 // Non-composite types of equal kind have same underlying type 1624 // (the predefined instance of the type). 1625 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1626 return true 1627 } 1628 1629 // Composite types. 1630 switch kind { 1631 case Array: 1632 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1633 1634 case Chan: 1635 // Special case: 1636 // x is a bidirectional channel value, T is a channel type, 1637 // and x's type V and T have identical element types. 1638 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) { 1639 return true 1640 } 1641 1642 // Otherwise continue test for identical underlying type. 1643 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1644 1645 case Func: 1646 t := (*funcType)(unsafe.Pointer(T)) 1647 v := (*funcType)(unsafe.Pointer(V)) 1648 if t.outCount != v.outCount || t.inCount != v.inCount { 1649 return false 1650 } 1651 for i := 0; i < t.NumIn(); i++ { 1652 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1653 return false 1654 } 1655 } 1656 for i := 0; i < t.NumOut(); i++ { 1657 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1658 return false 1659 } 1660 } 1661 return true 1662 1663 case Interface: 1664 t := (*interfaceType)(unsafe.Pointer(T)) 1665 v := (*interfaceType)(unsafe.Pointer(V)) 1666 if len(t.methods) == 0 && len(v.methods) == 0 { 1667 return true 1668 } 1669 // Might have the same methods but still 1670 // need a run time conversion. 1671 return false 1672 1673 case Map: 1674 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1675 1676 case Ptr, Slice: 1677 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1678 1679 case Struct: 1680 t := (*structType)(unsafe.Pointer(T)) 1681 v := (*structType)(unsafe.Pointer(V)) 1682 if len(t.fields) != len(v.fields) { 1683 return false 1684 } 1685 for i := range t.fields { 1686 tf := &t.fields[i] 1687 vf := &v.fields[i] 1688 if tf.name.name() != vf.name.name() { 1689 return false 1690 } 1691 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1692 return false 1693 } 1694 if cmpTags && tf.name.tag() != vf.name.tag() { 1695 return false 1696 } 1697 if tf.offset != vf.offset { 1698 return false 1699 } 1700 if !tf.name.isExported() { 1701 tp := tf.name.pkgPath() 1702 if tp == "" { 1703 tp = t.pkgPath.name() 1704 } 1705 vp := vf.name.pkgPath() 1706 if vp == "" { 1707 vp = v.pkgPath.name() 1708 } 1709 if tp != vp { 1710 return false 1711 } 1712 } 1713 } 1714 return true 1715 } 1716 1717 return false 1718 } 1719 1720 // typelinks is implemented in package runtime. 1721 // It returns a slice of the sections in each module, 1722 // and a slice of *rtype offsets in each module. 1723 // 1724 // The types in each module are sorted by string. That is, the first 1725 // two linked types of the first module are: 1726 // 1727 // d0 := sections[0] 1728 // t1 := (*rtype)(add(d0, offset[0][0])) 1729 // t2 := (*rtype)(add(d0, offset[0][1])) 1730 // 1731 // and 1732 // 1733 // t1.String() < t2.String() 1734 // 1735 // Note that strings are not unique identifiers for types: 1736 // there can be more than one with a given string. 1737 // Only types we might want to look up are included: 1738 // pointers, channels, maps, slices, and arrays. 1739 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1740 1741 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1742 return (*rtype)(add(section, uintptr(off))) 1743 } 1744 1745 // typesByString returns the subslice of typelinks() whose elements have 1746 // the given string representation. 1747 // It may be empty (no known types with that string) or may have 1748 // multiple elements (multiple types with that string). 1749 func typesByString(s string) []*rtype { 1750 sections, offset := typelinks() 1751 var ret []*rtype 1752 1753 for offsI, offs := range offset { 1754 section := sections[offsI] 1755 1756 // We are looking for the first index i where the string becomes >= s. 1757 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1758 i, j := 0, len(offs) 1759 for i < j { 1760 h := i + (j-i)/2 // avoid overflow when computing h 1761 // i ≤ h < j 1762 if !(rtypeOff(section, offs[h]).String() >= s) { 1763 i = h + 1 // preserves f(i-1) == false 1764 } else { 1765 j = h // preserves f(j) == true 1766 } 1767 } 1768 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1769 1770 // Having found the first, linear scan forward to find the last. 1771 // We could do a second binary search, but the caller is going 1772 // to do a linear scan anyway. 1773 for j := i; j < len(offs); j++ { 1774 typ := rtypeOff(section, offs[j]) 1775 if typ.String() != s { 1776 break 1777 } 1778 ret = append(ret, typ) 1779 } 1780 } 1781 return ret 1782 } 1783 1784 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1785 var lookupCache struct { 1786 sync.RWMutex 1787 m map[cacheKey]*rtype 1788 } 1789 1790 // A cacheKey is the key for use in the lookupCache. 1791 // Four values describe any of the types we are looking for: 1792 // type kind, one or two subtypes, and an extra integer. 1793 type cacheKey struct { 1794 kind Kind 1795 t1 *rtype 1796 t2 *rtype 1797 extra uintptr 1798 } 1799 1800 // cacheGet looks for a type under the key k in the lookupCache. 1801 // If it finds one, it returns that type. 1802 // If not, it returns nil with the cache locked. 1803 // The caller is expected to use cachePut to unlock the cache. 1804 func cacheGet(k cacheKey) Type { 1805 lookupCache.RLock() 1806 t := lookupCache.m[k] 1807 lookupCache.RUnlock() 1808 if t != nil { 1809 return t 1810 } 1811 1812 lookupCache.Lock() 1813 t = lookupCache.m[k] 1814 if t != nil { 1815 lookupCache.Unlock() 1816 return t 1817 } 1818 1819 if lookupCache.m == nil { 1820 lookupCache.m = make(map[cacheKey]*rtype) 1821 } 1822 1823 return nil 1824 } 1825 1826 // cachePut stores the given type in the cache, unlocks the cache, 1827 // and returns the type. It is expected that the cache is locked 1828 // because cacheGet returned nil. 1829 func cachePut(k cacheKey, t *rtype) Type { 1830 lookupCache.m[k] = t 1831 lookupCache.Unlock() 1832 return t 1833 } 1834 1835 // The funcLookupCache caches FuncOf lookups. 1836 // FuncOf does not share the common lookupCache since cacheKey is not 1837 // sufficient to represent functions unambiguously. 1838 var funcLookupCache struct { 1839 sync.RWMutex 1840 m map[uint32][]*rtype // keyed by hash calculated in FuncOf 1841 } 1842 1843 // ChanOf returns the channel type with the given direction and element type. 1844 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1845 // 1846 // The gc runtime imposes a limit of 64 kB on channel element types. 1847 // If t's size is equal to or exceeds this limit, ChanOf panics. 1848 func ChanOf(dir ChanDir, t Type) Type { 1849 typ := t.(*rtype) 1850 1851 // Look in cache. 1852 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1853 if ch := cacheGet(ckey); ch != nil { 1854 return ch 1855 } 1856 1857 // This restriction is imposed by the gc compiler and the runtime. 1858 if typ.size >= 1<<16 { 1859 lookupCache.Unlock() 1860 panic("reflect.ChanOf: element size too large") 1861 } 1862 1863 // Look in known types. 1864 // TODO: Precedence when constructing string. 1865 var s string 1866 switch dir { 1867 default: 1868 lookupCache.Unlock() 1869 panic("reflect.ChanOf: invalid dir") 1870 case SendDir: 1871 s = "chan<- " + typ.String() 1872 case RecvDir: 1873 s = "<-chan " + typ.String() 1874 case BothDir: 1875 s = "chan " + typ.String() 1876 } 1877 for _, tt := range typesByString(s) { 1878 ch := (*chanType)(unsafe.Pointer(tt)) 1879 if ch.elem == typ && ch.dir == uintptr(dir) { 1880 return cachePut(ckey, tt) 1881 } 1882 } 1883 1884 // Make a channel type. 1885 var ichan interface{} = (chan unsafe.Pointer)(nil) 1886 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1887 ch := *prototype 1888 ch.tflag = 0 1889 ch.dir = uintptr(dir) 1890 ch.str = resolveReflectName(newName(s, "", "", false)) 1891 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1892 ch.elem = typ 1893 1894 return cachePut(ckey, &ch.rtype) 1895 } 1896 1897 func ismapkey(*rtype) bool // implemented in runtime 1898 1899 // MapOf returns the map type with the given key and element types. 1900 // For example, if k represents int and e represents string, 1901 // MapOf(k, e) represents map[int]string. 1902 // 1903 // If the key type is not a valid map key type (that is, if it does 1904 // not implement Go's == operator), MapOf panics. 1905 func MapOf(key, elem Type) Type { 1906 ktyp := key.(*rtype) 1907 etyp := elem.(*rtype) 1908 1909 if !ismapkey(ktyp) { 1910 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1911 } 1912 1913 // Look in cache. 1914 ckey := cacheKey{Map, ktyp, etyp, 0} 1915 if mt := cacheGet(ckey); mt != nil { 1916 return mt 1917 } 1918 1919 // Look in known types. 1920 s := "map[" + ktyp.String() + "]" + etyp.String() 1921 for _, tt := range typesByString(s) { 1922 mt := (*mapType)(unsafe.Pointer(tt)) 1923 if mt.key == ktyp && mt.elem == etyp { 1924 return cachePut(ckey, tt) 1925 } 1926 } 1927 1928 // Make a map type. 1929 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1930 mt := **(**mapType)(unsafe.Pointer(&imap)) 1931 mt.str = resolveReflectName(newName(s, "", "", false)) 1932 mt.tflag = 0 1933 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1934 mt.key = ktyp 1935 mt.elem = etyp 1936 mt.bucket = bucketOf(ktyp, etyp) 1937 if ktyp.size > maxKeySize { 1938 mt.keysize = uint8(ptrSize) 1939 mt.indirectkey = 1 1940 } else { 1941 mt.keysize = uint8(ktyp.size) 1942 mt.indirectkey = 0 1943 } 1944 if etyp.size > maxValSize { 1945 mt.valuesize = uint8(ptrSize) 1946 mt.indirectvalue = 1 1947 } else { 1948 mt.valuesize = uint8(etyp.size) 1949 mt.indirectvalue = 0 1950 } 1951 mt.bucketsize = uint16(mt.bucket.size) 1952 mt.reflexivekey = isReflexive(ktyp) 1953 mt.needkeyupdate = needKeyUpdate(ktyp) 1954 mt.ptrToThis = 0 1955 1956 return cachePut(ckey, &mt.rtype) 1957 } 1958 1959 type funcTypeFixed4 struct { 1960 funcType 1961 args [4]*rtype 1962 } 1963 type funcTypeFixed8 struct { 1964 funcType 1965 args [8]*rtype 1966 } 1967 type funcTypeFixed16 struct { 1968 funcType 1969 args [16]*rtype 1970 } 1971 type funcTypeFixed32 struct { 1972 funcType 1973 args [32]*rtype 1974 } 1975 type funcTypeFixed64 struct { 1976 funcType 1977 args [64]*rtype 1978 } 1979 type funcTypeFixed128 struct { 1980 funcType 1981 args [128]*rtype 1982 } 1983 1984 // FuncOf returns the function type with the given argument and result types. 1985 // For example if k represents int and e represents string, 1986 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1987 // 1988 // The variadic argument controls whether the function is variadic. FuncOf 1989 // panics if the in[len(in)-1] does not represent a slice and variadic is 1990 // true. 1991 func FuncOf(in, out []Type, variadic bool) Type { 1992 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1993 panic("reflect.FuncOf: last arg of variadic func must be slice") 1994 } 1995 1996 // Make a func type. 1997 var ifunc interface{} = (func())(nil) 1998 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1999 n := len(in) + len(out) 2000 2001 var ft *funcType 2002 var args []*rtype 2003 switch { 2004 case n <= 4: 2005 fixed := new(funcTypeFixed4) 2006 args = fixed.args[:0:len(fixed.args)] 2007 ft = &fixed.funcType 2008 case n <= 8: 2009 fixed := new(funcTypeFixed8) 2010 args = fixed.args[:0:len(fixed.args)] 2011 ft = &fixed.funcType 2012 case n <= 16: 2013 fixed := new(funcTypeFixed16) 2014 args = fixed.args[:0:len(fixed.args)] 2015 ft = &fixed.funcType 2016 case n <= 32: 2017 fixed := new(funcTypeFixed32) 2018 args = fixed.args[:0:len(fixed.args)] 2019 ft = &fixed.funcType 2020 case n <= 64: 2021 fixed := new(funcTypeFixed64) 2022 args = fixed.args[:0:len(fixed.args)] 2023 ft = &fixed.funcType 2024 case n <= 128: 2025 fixed := new(funcTypeFixed128) 2026 args = fixed.args[:0:len(fixed.args)] 2027 ft = &fixed.funcType 2028 default: 2029 panic("reflect.FuncOf: too many arguments") 2030 } 2031 *ft = *prototype 2032 2033 // Build a hash and minimally populate ft. 2034 var hash uint32 2035 for _, in := range in { 2036 t := in.(*rtype) 2037 args = append(args, t) 2038 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2039 } 2040 if variadic { 2041 hash = fnv1(hash, 'v') 2042 } 2043 hash = fnv1(hash, '.') 2044 for _, out := range out { 2045 t := out.(*rtype) 2046 args = append(args, t) 2047 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2048 } 2049 if len(args) > 50 { 2050 panic("reflect.FuncOf does not support more than 50 arguments") 2051 } 2052 ft.tflag = 0 2053 ft.hash = hash 2054 ft.inCount = uint16(len(in)) 2055 ft.outCount = uint16(len(out)) 2056 if variadic { 2057 ft.outCount |= 1 << 15 2058 } 2059 2060 // Look in cache. 2061 funcLookupCache.RLock() 2062 for _, t := range funcLookupCache.m[hash] { 2063 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2064 funcLookupCache.RUnlock() 2065 return t 2066 } 2067 } 2068 funcLookupCache.RUnlock() 2069 2070 // Not in cache, lock and retry. 2071 funcLookupCache.Lock() 2072 defer funcLookupCache.Unlock() 2073 if funcLookupCache.m == nil { 2074 funcLookupCache.m = make(map[uint32][]*rtype) 2075 } 2076 for _, t := range funcLookupCache.m[hash] { 2077 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2078 return t 2079 } 2080 } 2081 2082 // Look in known types for the same string representation. 2083 str := funcStr(ft) 2084 for _, tt := range typesByString(str) { 2085 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2086 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt) 2087 return tt 2088 } 2089 } 2090 2091 // Populate the remaining fields of ft and store in cache. 2092 ft.str = resolveReflectName(newName(str, "", "", false)) 2093 ft.ptrToThis = 0 2094 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype) 2095 2096 return &ft.rtype 2097 } 2098 2099 // funcStr builds a string representation of a funcType. 2100 func funcStr(ft *funcType) string { 2101 repr := make([]byte, 0, 64) 2102 repr = append(repr, "func("...) 2103 for i, t := range ft.in() { 2104 if i > 0 { 2105 repr = append(repr, ", "...) 2106 } 2107 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2108 repr = append(repr, "..."...) 2109 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2110 } else { 2111 repr = append(repr, t.String()...) 2112 } 2113 } 2114 repr = append(repr, ')') 2115 out := ft.out() 2116 if len(out) == 1 { 2117 repr = append(repr, ' ') 2118 } else if len(out) > 1 { 2119 repr = append(repr, " ("...) 2120 } 2121 for i, t := range out { 2122 if i > 0 { 2123 repr = append(repr, ", "...) 2124 } 2125 repr = append(repr, t.String()...) 2126 } 2127 if len(out) > 1 { 2128 repr = append(repr, ')') 2129 } 2130 return string(repr) 2131 } 2132 2133 // isReflexive reports whether the == operation on the type is reflexive. 2134 // That is, x == x for all values x of type t. 2135 func isReflexive(t *rtype) bool { 2136 switch t.Kind() { 2137 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2138 return true 2139 case Float32, Float64, Complex64, Complex128, Interface: 2140 return false 2141 case Array: 2142 tt := (*arrayType)(unsafe.Pointer(t)) 2143 return isReflexive(tt.elem) 2144 case Struct: 2145 tt := (*structType)(unsafe.Pointer(t)) 2146 for _, f := range tt.fields { 2147 if !isReflexive(f.typ) { 2148 return false 2149 } 2150 } 2151 return true 2152 default: 2153 // Func, Map, Slice, Invalid 2154 panic("isReflexive called on non-key type " + t.String()) 2155 } 2156 } 2157 2158 // needKeyUpdate reports whether map overwrites require the key to be copied. 2159 func needKeyUpdate(t *rtype) bool { 2160 switch t.Kind() { 2161 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2162 return false 2163 case Float32, Float64, Complex64, Complex128, Interface, String: 2164 // Float keys can be updated from +0 to -0. 2165 // String keys can be updated to use a smaller backing store. 2166 // Interfaces might have floats of strings in them. 2167 return true 2168 case Array: 2169 tt := (*arrayType)(unsafe.Pointer(t)) 2170 return needKeyUpdate(tt.elem) 2171 case Struct: 2172 tt := (*structType)(unsafe.Pointer(t)) 2173 for _, f := range tt.fields { 2174 if needKeyUpdate(f.typ) { 2175 return true 2176 } 2177 } 2178 return false 2179 default: 2180 // Func, Map, Slice, Invalid 2181 panic("needKeyUpdate called on non-key type " + t.String()) 2182 } 2183 } 2184 2185 // Make sure these routines stay in sync with ../../runtime/hashmap.go! 2186 // These types exist only for GC, so we only fill out GC relevant info. 2187 // Currently, that's just size and the GC program. We also fill in string 2188 // for possible debugging use. 2189 const ( 2190 bucketSize uintptr = 8 2191 maxKeySize uintptr = 128 2192 maxValSize uintptr = 128 2193 ) 2194 2195 func bucketOf(ktyp, etyp *rtype) *rtype { 2196 // See comment on hmap.overflow in ../runtime/hashmap.go. 2197 var kind uint8 2198 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 && 2199 ktyp.size <= maxKeySize && etyp.size <= maxValSize { 2200 kind = kindNoPointers 2201 } 2202 2203 if ktyp.size > maxKeySize { 2204 ktyp = PtrTo(ktyp).(*rtype) 2205 } 2206 if etyp.size > maxValSize { 2207 etyp = PtrTo(etyp).(*rtype) 2208 } 2209 2210 // Prepare GC data if any. 2211 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2212 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2213 // Normally the enforced limit on pointer maps is 16 bytes, 2214 // but larger ones are acceptable, 33 bytes isn't too too big, 2215 // and it's easier to generate a pointer bitmap than a GC program. 2216 // Note that since the key and value are known to be <= 128 bytes, 2217 // they're guaranteed to have bitmaps instead of GC programs. 2218 var gcdata *byte 2219 var ptrdata uintptr 2220 var overflowPad uintptr 2221 2222 // On NaCl, pad if needed to make overflow end at the proper struct alignment. 2223 // On other systems, align > ptrSize is not possible. 2224 if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) { 2225 overflowPad = ptrSize 2226 } 2227 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2228 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2229 panic("reflect: bad size computation in MapOf") 2230 } 2231 2232 if kind != kindNoPointers { 2233 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2234 mask := make([]byte, (nptr+7)/8) 2235 base := bucketSize / ptrSize 2236 2237 if ktyp.kind&kindNoPointers == 0 { 2238 if ktyp.kind&kindGCProg != 0 { 2239 panic("reflect: unexpected GC program in MapOf") 2240 } 2241 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata)) 2242 for i := uintptr(0); i < ktyp.size/ptrSize; i++ { 2243 if (kmask[i/8]>>(i%8))&1 != 0 { 2244 for j := uintptr(0); j < bucketSize; j++ { 2245 word := base + j*ktyp.size/ptrSize + i 2246 mask[word/8] |= 1 << (word % 8) 2247 } 2248 } 2249 } 2250 } 2251 base += bucketSize * ktyp.size / ptrSize 2252 2253 if etyp.kind&kindNoPointers == 0 { 2254 if etyp.kind&kindGCProg != 0 { 2255 panic("reflect: unexpected GC program in MapOf") 2256 } 2257 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata)) 2258 for i := uintptr(0); i < etyp.size/ptrSize; i++ { 2259 if (emask[i/8]>>(i%8))&1 != 0 { 2260 for j := uintptr(0); j < bucketSize; j++ { 2261 word := base + j*etyp.size/ptrSize + i 2262 mask[word/8] |= 1 << (word % 8) 2263 } 2264 } 2265 } 2266 } 2267 base += bucketSize * etyp.size / ptrSize 2268 base += overflowPad / ptrSize 2269 2270 word := base 2271 mask[word/8] |= 1 << (word % 8) 2272 gcdata = &mask[0] 2273 ptrdata = (word + 1) * ptrSize 2274 2275 // overflow word must be last 2276 if ptrdata != size { 2277 panic("reflect: bad layout computation in MapOf") 2278 } 2279 } 2280 2281 b := &rtype{ 2282 align: ptrSize, 2283 size: size, 2284 kind: kind, 2285 ptrdata: ptrdata, 2286 gcdata: gcdata, 2287 } 2288 if overflowPad > 0 { 2289 b.align = 8 2290 } 2291 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2292 b.str = resolveReflectName(newName(s, "", "", false)) 2293 return b 2294 } 2295 2296 // SliceOf returns the slice type with element type t. 2297 // For example, if t represents int, SliceOf(t) represents []int. 2298 func SliceOf(t Type) Type { 2299 typ := t.(*rtype) 2300 2301 // Look in cache. 2302 ckey := cacheKey{Slice, typ, nil, 0} 2303 if slice := cacheGet(ckey); slice != nil { 2304 return slice 2305 } 2306 2307 // Look in known types. 2308 s := "[]" + typ.String() 2309 for _, tt := range typesByString(s) { 2310 slice := (*sliceType)(unsafe.Pointer(tt)) 2311 if slice.elem == typ { 2312 return cachePut(ckey, tt) 2313 } 2314 } 2315 2316 // Make a slice type. 2317 var islice interface{} = ([]unsafe.Pointer)(nil) 2318 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2319 slice := *prototype 2320 slice.tflag = 0 2321 slice.str = resolveReflectName(newName(s, "", "", false)) 2322 slice.hash = fnv1(typ.hash, '[') 2323 slice.elem = typ 2324 slice.ptrToThis = 0 2325 2326 return cachePut(ckey, &slice.rtype) 2327 } 2328 2329 // The structLookupCache caches StructOf lookups. 2330 // StructOf does not share the common lookupCache since we need to pin 2331 // the memory associated with *structTypeFixedN. 2332 var structLookupCache struct { 2333 sync.RWMutex 2334 m map[uint32][]interface { 2335 common() *rtype 2336 } // keyed by hash calculated in StructOf 2337 } 2338 2339 type structTypeUncommon struct { 2340 structType 2341 u uncommonType 2342 } 2343 2344 // A *rtype representing a struct is followed directly in memory by an 2345 // array of method objects representing the methods attached to the 2346 // struct. To get the same layout for a run time generated type, we 2347 // need an array directly following the uncommonType memory. The types 2348 // structTypeFixed4, ...structTypeFixedN are used to do this. 2349 // 2350 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2351 2352 // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs 2353 // have no methods, they could be defined at runtime using the StructOf 2354 // function. 2355 2356 type structTypeFixed4 struct { 2357 structType 2358 u uncommonType 2359 m [4]method 2360 } 2361 2362 type structTypeFixed8 struct { 2363 structType 2364 u uncommonType 2365 m [8]method 2366 } 2367 2368 type structTypeFixed16 struct { 2369 structType 2370 u uncommonType 2371 m [16]method 2372 } 2373 2374 type structTypeFixed32 struct { 2375 structType 2376 u uncommonType 2377 m [32]method 2378 } 2379 2380 // StructOf returns the struct type containing fields. 2381 // The Offset and Index fields are ignored and computed as they would be 2382 // by the compiler. 2383 // 2384 // StructOf currently does not generate wrapper methods for embedded fields. 2385 // This limitation may be lifted in a future version. 2386 func StructOf(fields []StructField) Type { 2387 var ( 2388 hash = fnv1(0, []byte("struct {")...) 2389 size uintptr 2390 typalign uint8 2391 comparable = true 2392 hashable = true 2393 methods []method 2394 2395 fs = make([]structField, len(fields)) 2396 repr = make([]byte, 0, 64) 2397 fset = map[string]struct{}{} // fields' names 2398 2399 hasPtr = false // records whether at least one struct-field is a pointer 2400 hasGCProg = false // records whether a struct-field type has a GCProg 2401 ) 2402 2403 lastzero := uintptr(0) 2404 repr = append(repr, "struct {"...) 2405 for i, field := range fields { 2406 if field.Type == nil { 2407 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2408 } 2409 f := runtimeStructField(field) 2410 ft := f.typ 2411 if ft.kind&kindGCProg != 0 { 2412 hasGCProg = true 2413 } 2414 if ft.pointers() { 2415 hasPtr = true 2416 } 2417 2418 name := "" 2419 // Update string and hash 2420 if f.name.nameLen() > 0 { 2421 hash = fnv1(hash, []byte(f.name.name())...) 2422 repr = append(repr, (" " + f.name.name())...) 2423 name = f.name.name() 2424 } else { 2425 // Embedded field 2426 if f.typ.Kind() == Ptr { 2427 // Embedded ** and *interface{} are illegal 2428 elem := ft.Elem() 2429 if k := elem.Kind(); k == Ptr || k == Interface { 2430 panic("reflect.StructOf: illegal anonymous field type " + ft.String()) 2431 } 2432 name = elem.String() 2433 } else { 2434 name = ft.String() 2435 } 2436 // TODO(sbinet) check for syntactically impossible type names? 2437 2438 switch f.typ.Kind() { 2439 case Interface: 2440 ift := (*interfaceType)(unsafe.Pointer(ft)) 2441 for im, m := range ift.methods { 2442 if ift.nameOff(m.name).pkgPath() != "" { 2443 // TODO(sbinet) 2444 panic("reflect: embedded interface with unexported method(s) not implemented") 2445 } 2446 2447 var ( 2448 mtyp = ift.typeOff(m.typ) 2449 ifield = i 2450 imethod = im 2451 ifn Value 2452 tfn Value 2453 ) 2454 2455 if ft.kind&kindDirectIface != 0 { 2456 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2457 var args []Value 2458 var recv = in[0] 2459 if len(in) > 1 { 2460 args = in[1:] 2461 } 2462 return recv.Field(ifield).Method(imethod).Call(args) 2463 }) 2464 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2465 var args []Value 2466 var recv = in[0] 2467 if len(in) > 1 { 2468 args = in[1:] 2469 } 2470 return recv.Field(ifield).Method(imethod).Call(args) 2471 }) 2472 } else { 2473 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2474 var args []Value 2475 var recv = in[0] 2476 if len(in) > 1 { 2477 args = in[1:] 2478 } 2479 return recv.Field(ifield).Method(imethod).Call(args) 2480 }) 2481 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2482 var args []Value 2483 var recv = Indirect(in[0]) 2484 if len(in) > 1 { 2485 args = in[1:] 2486 } 2487 return recv.Field(ifield).Method(imethod).Call(args) 2488 }) 2489 } 2490 2491 methods = append(methods, method{ 2492 name: resolveReflectName(ift.nameOff(m.name)), 2493 mtyp: resolveReflectType(mtyp), 2494 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2495 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2496 }) 2497 } 2498 case Ptr: 2499 ptr := (*ptrType)(unsafe.Pointer(ft)) 2500 if unt := ptr.uncommon(); unt != nil { 2501 for _, m := range unt.methods() { 2502 mname := ptr.nameOff(m.name) 2503 if mname.pkgPath() != "" { 2504 // TODO(sbinet) 2505 panic("reflect: embedded interface with unexported method(s) not implemented") 2506 } 2507 methods = append(methods, method{ 2508 name: resolveReflectName(mname), 2509 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2510 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2511 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2512 }) 2513 } 2514 } 2515 if unt := ptr.elem.uncommon(); unt != nil { 2516 for _, m := range unt.methods() { 2517 mname := ptr.nameOff(m.name) 2518 if mname.pkgPath() != "" { 2519 // TODO(sbinet) 2520 panic("reflect: embedded interface with unexported method(s) not implemented") 2521 } 2522 methods = append(methods, method{ 2523 name: resolveReflectName(mname), 2524 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2525 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2526 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2527 }) 2528 } 2529 } 2530 default: 2531 if unt := ft.uncommon(); unt != nil { 2532 for _, m := range unt.methods() { 2533 mname := ft.nameOff(m.name) 2534 if mname.pkgPath() != "" { 2535 // TODO(sbinet) 2536 panic("reflect: embedded interface with unexported method(s) not implemented") 2537 } 2538 methods = append(methods, method{ 2539 name: resolveReflectName(mname), 2540 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2541 ifn: resolveReflectText(ft.textOff(m.ifn)), 2542 tfn: resolveReflectText(ft.textOff(m.tfn)), 2543 }) 2544 2545 } 2546 } 2547 } 2548 } 2549 if _, dup := fset[name]; dup { 2550 panic("reflect.StructOf: duplicate field " + name) 2551 } 2552 fset[name] = struct{}{} 2553 2554 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2555 2556 repr = append(repr, (" " + ft.String())...) 2557 if f.name.tagLen() > 0 { 2558 hash = fnv1(hash, []byte(f.name.tag())...) 2559 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2560 } 2561 if i < len(fields)-1 { 2562 repr = append(repr, ';') 2563 } 2564 2565 comparable = comparable && (ft.alg.equal != nil) 2566 hashable = hashable && (ft.alg.hash != nil) 2567 2568 f.offset = align(size, uintptr(ft.align)) 2569 if ft.align > typalign { 2570 typalign = ft.align 2571 } 2572 size = f.offset + ft.size 2573 2574 if ft.size == 0 { 2575 lastzero = size 2576 } 2577 2578 fs[i] = f 2579 } 2580 2581 if size > 0 && lastzero == size { 2582 // This is a non-zero sized struct that ends in a 2583 // zero-sized field. We add an extra byte of padding, 2584 // to ensure that taking the address of the final 2585 // zero-sized field can't manufacture a pointer to the 2586 // next object in the heap. See issue 9401. 2587 size++ 2588 } 2589 2590 var typ *structType 2591 var ut *uncommonType 2592 var typPin interface { 2593 common() *rtype 2594 } // structTypeFixedN 2595 2596 switch { 2597 case len(methods) == 0: 2598 t := new(structTypeUncommon) 2599 typ = &t.structType 2600 ut = &t.u 2601 typPin = t 2602 case len(methods) <= 4: 2603 t := new(structTypeFixed4) 2604 typ = &t.structType 2605 ut = &t.u 2606 copy(t.m[:], methods) 2607 typPin = t 2608 case len(methods) <= 8: 2609 t := new(structTypeFixed8) 2610 typ = &t.structType 2611 ut = &t.u 2612 copy(t.m[:], methods) 2613 typPin = t 2614 case len(methods) <= 16: 2615 t := new(structTypeFixed16) 2616 typ = &t.structType 2617 ut = &t.u 2618 copy(t.m[:], methods) 2619 typPin = t 2620 case len(methods) <= 32: 2621 t := new(structTypeFixed32) 2622 typ = &t.structType 2623 ut = &t.u 2624 copy(t.m[:], methods) 2625 typPin = t 2626 default: 2627 panic("reflect.StructOf: too many methods") 2628 } 2629 ut.mcount = uint16(len(methods)) 2630 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2631 2632 if len(fs) > 0 { 2633 repr = append(repr, ' ') 2634 } 2635 repr = append(repr, '}') 2636 hash = fnv1(hash, '}') 2637 str := string(repr) 2638 2639 // Round the size up to be a multiple of the alignment. 2640 size = align(size, uintptr(typalign)) 2641 2642 // Make the struct type. 2643 var istruct interface{} = struct{}{} 2644 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2645 *typ = *prototype 2646 typ.fields = fs 2647 2648 // Look in cache 2649 structLookupCache.RLock() 2650 for _, st := range structLookupCache.m[hash] { 2651 t := st.common() 2652 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2653 structLookupCache.RUnlock() 2654 return t 2655 } 2656 } 2657 structLookupCache.RUnlock() 2658 2659 // not in cache, lock and retry 2660 structLookupCache.Lock() 2661 defer structLookupCache.Unlock() 2662 if structLookupCache.m == nil { 2663 structLookupCache.m = make(map[uint32][]interface { 2664 common() *rtype 2665 }) 2666 } 2667 for _, st := range structLookupCache.m[hash] { 2668 t := st.common() 2669 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2670 return t 2671 } 2672 } 2673 2674 // Look in known types. 2675 for _, t := range typesByString(str) { 2676 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2677 // even if 't' wasn't a structType with methods, we should be ok 2678 // as the 'u uncommonType' field won't be accessed except when 2679 // tflag&tflagUncommon is set. 2680 structLookupCache.m[hash] = append(structLookupCache.m[hash], t) 2681 return t 2682 } 2683 } 2684 2685 typ.str = resolveReflectName(newName(str, "", "", false)) 2686 typ.tflag = 0 2687 typ.hash = hash 2688 typ.size = size 2689 typ.align = typalign 2690 typ.fieldAlign = typalign 2691 typ.ptrToThis = 0 2692 if len(methods) > 0 { 2693 typ.tflag |= tflagUncommon 2694 } 2695 if !hasPtr { 2696 typ.kind |= kindNoPointers 2697 } else { 2698 typ.kind &^= kindNoPointers 2699 } 2700 2701 if hasGCProg { 2702 lastPtrField := 0 2703 for i, ft := range fs { 2704 if ft.typ.pointers() { 2705 lastPtrField = i 2706 } 2707 } 2708 prog := []byte{0, 0, 0, 0} // will be length of prog 2709 for i, ft := range fs { 2710 if i > lastPtrField { 2711 // gcprog should not include anything for any field after 2712 // the last field that contains pointer data 2713 break 2714 } 2715 // FIXME(sbinet) handle padding, fields smaller than a word 2716 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:] 2717 elemPtrs := ft.typ.ptrdata / ptrSize 2718 switch { 2719 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0: 2720 // Element is small with pointer mask; use as literal bits. 2721 mask := elemGC 2722 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2723 var n uintptr 2724 for n := elemPtrs; n > 120; n -= 120 { 2725 prog = append(prog, 120) 2726 prog = append(prog, mask[:15]...) 2727 mask = mask[15:] 2728 } 2729 prog = append(prog, byte(n)) 2730 prog = append(prog, mask[:(n+7)/8]...) 2731 case ft.typ.kind&kindGCProg != 0: 2732 // Element has GC program; emit one element. 2733 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2734 prog = append(prog, elemProg...) 2735 } 2736 // Pad from ptrdata to size. 2737 elemWords := ft.typ.size / ptrSize 2738 if elemPtrs < elemWords { 2739 // Emit literal 0 bit, then repeat as needed. 2740 prog = append(prog, 0x01, 0x00) 2741 if elemPtrs+1 < elemWords { 2742 prog = append(prog, 0x81) 2743 prog = appendVarint(prog, elemWords-elemPtrs-1) 2744 } 2745 } 2746 } 2747 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2748 typ.kind |= kindGCProg 2749 typ.gcdata = &prog[0] 2750 } else { 2751 typ.kind &^= kindGCProg 2752 bv := new(bitVector) 2753 addTypeBits(bv, 0, typ.common()) 2754 if len(bv.data) > 0 { 2755 typ.gcdata = &bv.data[0] 2756 } 2757 } 2758 typ.ptrdata = typeptrdata(typ.common()) 2759 typ.alg = new(typeAlg) 2760 if hashable { 2761 typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr { 2762 o := seed 2763 for _, ft := range typ.fields { 2764 pi := unsafe.Pointer(uintptr(p) + ft.offset) 2765 o = ft.typ.alg.hash(pi, o) 2766 } 2767 return o 2768 } 2769 } 2770 2771 if comparable { 2772 typ.alg.equal = func(p, q unsafe.Pointer) bool { 2773 for _, ft := range typ.fields { 2774 pi := unsafe.Pointer(uintptr(p) + ft.offset) 2775 qi := unsafe.Pointer(uintptr(q) + ft.offset) 2776 if !ft.typ.alg.equal(pi, qi) { 2777 return false 2778 } 2779 } 2780 return true 2781 } 2782 } 2783 2784 switch { 2785 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2786 // structs of 1 direct iface type can be direct 2787 typ.kind |= kindDirectIface 2788 default: 2789 typ.kind &^= kindDirectIface 2790 } 2791 2792 structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin) 2793 return &typ.rtype 2794 } 2795 2796 func runtimeStructField(field StructField) structField { 2797 exported := field.PkgPath == "" 2798 if field.Name == "" { 2799 t := field.Type.(*rtype) 2800 if t.Kind() == Ptr { 2801 t = t.Elem().(*rtype) 2802 } 2803 exported = t.nameOff(t.str).isExported() 2804 } else if exported { 2805 b0 := field.Name[0] 2806 if ('a' <= b0 && b0 <= 'z') || b0 == '_' { 2807 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath") 2808 } 2809 } 2810 2811 _ = resolveReflectType(field.Type.common()) 2812 return structField{ 2813 name: newName(field.Name, string(field.Tag), field.PkgPath, exported), 2814 typ: field.Type.common(), 2815 offset: 0, 2816 } 2817 } 2818 2819 // typeptrdata returns the length in bytes of the prefix of t 2820 // containing pointer data. Anything after this offset is scalar data. 2821 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2822 func typeptrdata(t *rtype) uintptr { 2823 if !t.pointers() { 2824 return 0 2825 } 2826 switch t.Kind() { 2827 case Struct: 2828 st := (*structType)(unsafe.Pointer(t)) 2829 // find the last field that has pointers. 2830 field := 0 2831 for i := range st.fields { 2832 ft := st.fields[i].typ 2833 if ft.pointers() { 2834 field = i 2835 } 2836 } 2837 f := st.fields[field] 2838 return f.offset + f.typ.ptrdata 2839 2840 default: 2841 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2842 } 2843 } 2844 2845 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2846 const maxPtrmaskBytes = 2048 2847 2848 // ArrayOf returns the array type with the given count and element type. 2849 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2850 // 2851 // If the resulting type would be larger than the available address space, 2852 // ArrayOf panics. 2853 func ArrayOf(count int, elem Type) Type { 2854 typ := elem.(*rtype) 2855 // call SliceOf here as it calls cacheGet/cachePut. 2856 // ArrayOf also calls cacheGet/cachePut and thus may modify the state of 2857 // the lookupCache mutex. 2858 slice := SliceOf(elem) 2859 2860 // Look in cache. 2861 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2862 if array := cacheGet(ckey); array != nil { 2863 return array 2864 } 2865 2866 // Look in known types. 2867 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2868 for _, tt := range typesByString(s) { 2869 array := (*arrayType)(unsafe.Pointer(tt)) 2870 if array.elem == typ { 2871 return cachePut(ckey, tt) 2872 } 2873 } 2874 2875 // Make an array type. 2876 var iarray interface{} = [1]unsafe.Pointer{} 2877 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2878 array := *prototype 2879 array.str = resolveReflectName(newName(s, "", "", false)) 2880 array.hash = fnv1(typ.hash, '[') 2881 for n := uint32(count); n > 0; n >>= 8 { 2882 array.hash = fnv1(array.hash, byte(n)) 2883 } 2884 array.hash = fnv1(array.hash, ']') 2885 array.elem = typ 2886 array.ptrToThis = 0 2887 max := ^uintptr(0) / typ.size 2888 if uintptr(count) > max { 2889 panic("reflect.ArrayOf: array size would exceed virtual address space") 2890 } 2891 array.size = typ.size * uintptr(count) 2892 if count > 0 && typ.ptrdata != 0 { 2893 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2894 } 2895 array.align = typ.align 2896 array.fieldAlign = typ.fieldAlign 2897 array.len = uintptr(count) 2898 array.slice = slice.(*rtype) 2899 2900 array.kind &^= kindNoPointers 2901 switch { 2902 case typ.kind&kindNoPointers != 0 || array.size == 0: 2903 // No pointers. 2904 array.kind |= kindNoPointers 2905 array.gcdata = nil 2906 array.ptrdata = 0 2907 2908 case count == 1: 2909 // In memory, 1-element array looks just like the element. 2910 array.kind |= typ.kind & kindGCProg 2911 array.gcdata = typ.gcdata 2912 array.ptrdata = typ.ptrdata 2913 2914 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2915 // Element is small with pointer mask; array is still small. 2916 // Create direct pointer mask by turning each 1 bit in elem 2917 // into count 1 bits in larger mask. 2918 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2919 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2920 elemWords := typ.size / ptrSize 2921 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ { 2922 if (elemMask[j/8]>>(j%8))&1 != 0 { 2923 for i := uintptr(0); i < array.len; i++ { 2924 k := i*elemWords + j 2925 mask[k/8] |= 1 << (k % 8) 2926 } 2927 } 2928 } 2929 array.gcdata = &mask[0] 2930 2931 default: 2932 // Create program that emits one element 2933 // and then repeats to make the array. 2934 prog := []byte{0, 0, 0, 0} // will be length of prog 2935 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2936 elemPtrs := typ.ptrdata / ptrSize 2937 if typ.kind&kindGCProg == 0 { 2938 // Element is small with pointer mask; use as literal bits. 2939 mask := elemGC 2940 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2941 var n uintptr 2942 for n = elemPtrs; n > 120; n -= 120 { 2943 prog = append(prog, 120) 2944 prog = append(prog, mask[:15]...) 2945 mask = mask[15:] 2946 } 2947 prog = append(prog, byte(n)) 2948 prog = append(prog, mask[:(n+7)/8]...) 2949 } else { 2950 // Element has GC program; emit one element. 2951 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2952 prog = append(prog, elemProg...) 2953 } 2954 // Pad from ptrdata to size. 2955 elemWords := typ.size / ptrSize 2956 if elemPtrs < elemWords { 2957 // Emit literal 0 bit, then repeat as needed. 2958 prog = append(prog, 0x01, 0x00) 2959 if elemPtrs+1 < elemWords { 2960 prog = append(prog, 0x81) 2961 prog = appendVarint(prog, elemWords-elemPtrs-1) 2962 } 2963 } 2964 // Repeat count-1 times. 2965 if elemWords < 0x80 { 2966 prog = append(prog, byte(elemWords|0x80)) 2967 } else { 2968 prog = append(prog, 0x80) 2969 prog = appendVarint(prog, elemWords) 2970 } 2971 prog = appendVarint(prog, uintptr(count)-1) 2972 prog = append(prog, 0) 2973 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2974 array.kind |= kindGCProg 2975 array.gcdata = &prog[0] 2976 array.ptrdata = array.size // overestimate but ok; must match program 2977 } 2978 2979 etyp := typ.common() 2980 esize := etyp.Size() 2981 ealg := etyp.alg 2982 2983 array.alg = new(typeAlg) 2984 if ealg.equal != nil { 2985 eequal := ealg.equal 2986 array.alg.equal = func(p, q unsafe.Pointer) bool { 2987 for i := 0; i < count; i++ { 2988 pi := arrayAt(p, i, esize) 2989 qi := arrayAt(q, i, esize) 2990 if !eequal(pi, qi) { 2991 return false 2992 } 2993 2994 } 2995 return true 2996 } 2997 } 2998 if ealg.hash != nil { 2999 ehash := ealg.hash 3000 array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr { 3001 o := seed 3002 for i := 0; i < count; i++ { 3003 o = ehash(arrayAt(ptr, i, esize), o) 3004 } 3005 return o 3006 } 3007 } 3008 3009 switch { 3010 case count == 1 && !ifaceIndir(typ): 3011 // array of 1 direct iface type can be direct 3012 array.kind |= kindDirectIface 3013 default: 3014 array.kind &^= kindDirectIface 3015 } 3016 3017 return cachePut(ckey, &array.rtype) 3018 } 3019 3020 func appendVarint(x []byte, v uintptr) []byte { 3021 for ; v >= 0x80; v >>= 7 { 3022 x = append(x, byte(v|0x80)) 3023 } 3024 x = append(x, byte(v)) 3025 return x 3026 } 3027 3028 // toType converts from a *rtype to a Type that can be returned 3029 // to the client of package reflect. In gc, the only concern is that 3030 // a nil *rtype must be replaced by a nil Type, but in gccgo this 3031 // function takes care of ensuring that multiple *rtype for the same 3032 // type are coalesced into a single Type. 3033 func toType(t *rtype) Type { 3034 if t == nil { 3035 return nil 3036 } 3037 return t 3038 } 3039 3040 type layoutKey struct { 3041 t *rtype // function signature 3042 rcvr *rtype // receiver type, or nil if none 3043 } 3044 3045 type layoutType struct { 3046 t *rtype 3047 argSize uintptr // size of arguments 3048 retOffset uintptr // offset of return values. 3049 stack *bitVector 3050 framePool *sync.Pool 3051 } 3052 3053 var layoutCache struct { 3054 sync.RWMutex 3055 m map[layoutKey]layoutType 3056 } 3057 3058 // funcLayout computes a struct type representing the layout of the 3059 // function arguments and return values for the function type t. 3060 // If rcvr != nil, rcvr specifies the type of the receiver. 3061 // The returned type exists only for GC, so we only fill out GC relevant info. 3062 // Currently, that's just size and the GC program. We also fill in 3063 // the name for possible debugging use. 3064 func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3065 if t.Kind() != Func { 3066 panic("reflect: funcLayout of non-func type") 3067 } 3068 if rcvr != nil && rcvr.Kind() == Interface { 3069 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3070 } 3071 k := layoutKey{t, rcvr} 3072 layoutCache.RLock() 3073 if x := layoutCache.m[k]; x.t != nil { 3074 layoutCache.RUnlock() 3075 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3076 } 3077 layoutCache.RUnlock() 3078 layoutCache.Lock() 3079 if x := layoutCache.m[k]; x.t != nil { 3080 layoutCache.Unlock() 3081 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3082 } 3083 3084 tt := (*funcType)(unsafe.Pointer(t)) 3085 3086 // compute gc program & stack bitmap for arguments 3087 ptrmap := new(bitVector) 3088 var offset uintptr 3089 if rcvr != nil { 3090 // Reflect uses the "interface" calling convention for 3091 // methods, where receivers take one word of argument 3092 // space no matter how big they actually are. 3093 if ifaceIndir(rcvr) || rcvr.pointers() { 3094 ptrmap.append(1) 3095 } 3096 offset += ptrSize 3097 } 3098 for _, arg := range tt.in() { 3099 offset += -offset & uintptr(arg.align-1) 3100 addTypeBits(ptrmap, offset, arg) 3101 offset += arg.size 3102 } 3103 argN := ptrmap.n 3104 argSize = offset 3105 if runtime.GOARCH == "amd64p32" { 3106 offset += -offset & (8 - 1) 3107 } 3108 offset += -offset & (ptrSize - 1) 3109 retOffset = offset 3110 for _, res := range tt.out() { 3111 offset += -offset & uintptr(res.align-1) 3112 addTypeBits(ptrmap, offset, res) 3113 offset += res.size 3114 } 3115 offset += -offset & (ptrSize - 1) 3116 3117 // build dummy rtype holding gc program 3118 x := &rtype{ 3119 align: ptrSize, 3120 size: offset, 3121 ptrdata: uintptr(ptrmap.n) * ptrSize, 3122 } 3123 if runtime.GOARCH == "amd64p32" { 3124 x.align = 8 3125 } 3126 if ptrmap.n > 0 { 3127 x.gcdata = &ptrmap.data[0] 3128 } else { 3129 x.kind |= kindNoPointers 3130 } 3131 ptrmap.n = argN 3132 3133 var s string 3134 if rcvr != nil { 3135 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3136 } else { 3137 s = "funcargs(" + t.String() + ")" 3138 } 3139 x.str = resolveReflectName(newName(s, "", "", false)) 3140 3141 // cache result for future callers 3142 if layoutCache.m == nil { 3143 layoutCache.m = make(map[layoutKey]layoutType) 3144 } 3145 framePool = &sync.Pool{New: func() interface{} { 3146 return unsafe_New(x) 3147 }} 3148 layoutCache.m[k] = layoutType{ 3149 t: x, 3150 argSize: argSize, 3151 retOffset: retOffset, 3152 stack: ptrmap, 3153 framePool: framePool, 3154 } 3155 layoutCache.Unlock() 3156 return x, argSize, retOffset, ptrmap, framePool 3157 } 3158 3159 // ifaceIndir reports whether t is stored indirectly in an interface value. 3160 func ifaceIndir(t *rtype) bool { 3161 return t.kind&kindDirectIface == 0 3162 } 3163 3164 // Layout matches runtime.BitVector (well enough). 3165 type bitVector struct { 3166 n uint32 // number of bits 3167 data []byte 3168 } 3169 3170 // append a bit to the bitmap. 3171 func (bv *bitVector) append(bit uint8) { 3172 if bv.n%8 == 0 { 3173 bv.data = append(bv.data, 0) 3174 } 3175 bv.data[bv.n/8] |= bit << (bv.n % 8) 3176 bv.n++ 3177 } 3178 3179 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3180 if t.kind&kindNoPointers != 0 { 3181 return 3182 } 3183 3184 switch Kind(t.kind & kindMask) { 3185 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3186 // 1 pointer at start of representation 3187 for bv.n < uint32(offset/uintptr(ptrSize)) { 3188 bv.append(0) 3189 } 3190 bv.append(1) 3191 3192 case Interface: 3193 // 2 pointers 3194 for bv.n < uint32(offset/uintptr(ptrSize)) { 3195 bv.append(0) 3196 } 3197 bv.append(1) 3198 bv.append(1) 3199 3200 case Array: 3201 // repeat inner type 3202 tt := (*arrayType)(unsafe.Pointer(t)) 3203 for i := 0; i < int(tt.len); i++ { 3204 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3205 } 3206 3207 case Struct: 3208 // apply fields 3209 tt := (*structType)(unsafe.Pointer(t)) 3210 for i := range tt.fields { 3211 f := &tt.fields[i] 3212 addTypeBits(bv, offset+f.offset, f.typ) 3213 } 3214 } 3215 }