github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "github.com/x04/go/src/strconv" 20 "github.com/x04/go/src/sync" 21 "github.com/x04/go/src/unicode" 22 "github.com/x04/go/src/unicode/utf8" 23 "github.com/x04/go/src/unsafe" 24 ) 25 26 // Type is the representation of a Go type. 27 // 28 // Not all methods apply to all kinds of types. Restrictions, 29 // if any, are noted in the documentation for each method. 30 // Use the Kind method to find out the kind of type before 31 // calling kind-specific methods. Calling a method 32 // inappropriate to the kind of type causes a run-time panic. 33 // 34 // Type values are comparable, such as with the == operator, 35 // so they can be used as map keys. 36 // Two Type values are equal if they represent identical types. 37 type Type interface { 38 // Methods applicable to all types. 39 40 // Align returns the alignment in bytes of a value of 41 // this type when allocated in memory. 42 Align() int 43 44 // FieldAlign returns the alignment in bytes of a value of 45 // this type when used as a field in a struct. 46 FieldAlign() int 47 48 // Method returns the i'th method in the type's method set. 49 // It panics if i is not in the range [0, NumMethod()). 50 // 51 // For a non-interface type T or *T, the returned Method's Type and Func 52 // fields describe a function whose first argument is the receiver. 53 // 54 // For an interface type, the returned Method's Type field gives the 55 // method signature, without a receiver, and the Func field is nil. 56 // 57 // Only exported methods are accessible and they are sorted in 58 // lexicographic order. 59 Method(int) Method 60 61 // MethodByName returns the method with that name in the type's 62 // method set and a boolean indicating if the method was found. 63 // 64 // For a non-interface type T or *T, the returned Method's Type and Func 65 // fields describe a function whose first argument is the receiver. 66 // 67 // For an interface type, the returned Method's Type field gives the 68 // method signature, without a receiver, and the Func field is nil. 69 MethodByName(string) (Method, bool) 70 71 // NumMethod returns the number of exported methods in the type's method set. 72 NumMethod() int 73 74 // Name returns the type's name within its package for a defined type. 75 // For other (non-defined) types it returns the empty string. 76 Name() string 77 78 // PkgPath returns a defined type's package path, that is, the import path 79 // that uniquely identifies the package, such as "encoding/base64". 80 // If the type was predeclared (string, error) or not defined (*T, struct{}, 81 // []int, or A where A is an alias for a non-defined type), the package path 82 // will be the empty string. 83 PkgPath() string 84 85 // Size returns the number of bytes needed to store 86 // a value of the given type; it is analogous to unsafe.Sizeof. 87 Size() uintptr 88 89 // String returns a string representation of the type. 90 // The string representation may use shortened package names 91 // (e.g., base64 instead of "encoding/base64") and is not 92 // guaranteed to be unique among types. To test for type identity, 93 // compare the Types directly. 94 String() string 95 96 // Kind returns the specific kind of this type. 97 Kind() Kind 98 99 // Implements reports whether the type implements the interface type u. 100 Implements(u Type) bool 101 102 // AssignableTo reports whether a value of the type is assignable to type u. 103 AssignableTo(u Type) bool 104 105 // ConvertibleTo reports whether a value of the type is convertible to type u. 106 ConvertibleTo(u Type) bool 107 108 // Comparable reports whether values of this type are comparable. 109 Comparable() bool 110 111 // Methods applicable only to some types, depending on Kind. 112 // The methods allowed for each kind are: 113 // 114 // Int*, Uint*, Float*, Complex*: Bits 115 // Array: Elem, Len 116 // Chan: ChanDir, Elem 117 // Func: In, NumIn, Out, NumOut, IsVariadic. 118 // Map: Key, Elem 119 // Ptr: Elem 120 // Slice: Elem 121 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 122 123 // Bits returns the size of the type in bits. 124 // It panics if the type's Kind is not one of the 125 // sized or unsized Int, Uint, Float, or Complex kinds. 126 Bits() int 127 128 // ChanDir returns a channel type's direction. 129 // It panics if the type's Kind is not Chan. 130 ChanDir() ChanDir 131 132 // IsVariadic reports whether a function type's final input parameter 133 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 134 // implicit actual type []T. 135 // 136 // For concreteness, if t represents func(x int, y ... float64), then 137 // 138 // t.NumIn() == 2 139 // t.In(0) is the reflect.Type for "int" 140 // t.In(1) is the reflect.Type for "[]float64" 141 // t.IsVariadic() == true 142 // 143 // IsVariadic panics if the type's Kind is not Func. 144 IsVariadic() bool 145 146 // Elem returns a type's element type. 147 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 148 Elem() Type 149 150 // Field returns a struct type's i'th field. 151 // It panics if the type's Kind is not Struct. 152 // It panics if i is not in the range [0, NumField()). 153 Field(i int) StructField 154 155 // FieldByIndex returns the nested field corresponding 156 // to the index sequence. It is equivalent to calling Field 157 // successively for each index i. 158 // It panics if the type's Kind is not Struct. 159 FieldByIndex(index []int) StructField 160 161 // FieldByName returns the struct field with the given name 162 // and a boolean indicating if the field was found. 163 FieldByName(name string) (StructField, bool) 164 165 // FieldByNameFunc returns the struct field with a name 166 // that satisfies the match function and a boolean indicating if 167 // the field was found. 168 // 169 // FieldByNameFunc considers the fields in the struct itself 170 // and then the fields in any embedded structs, in breadth first order, 171 // stopping at the shallowest nesting depth containing one or more 172 // fields satisfying the match function. If multiple fields at that depth 173 // satisfy the match function, they cancel each other 174 // and FieldByNameFunc returns no match. 175 // This behavior mirrors Go's handling of name lookup in 176 // structs containing embedded fields. 177 FieldByNameFunc(match func(string) bool) (StructField, bool) 178 179 // In returns the type of a function type's i'th input parameter. 180 // It panics if the type's Kind is not Func. 181 // It panics if i is not in the range [0, NumIn()). 182 In(i int) Type 183 184 // Key returns a map type's key type. 185 // It panics if the type's Kind is not Map. 186 Key() Type 187 188 // Len returns an array type's length. 189 // It panics if the type's Kind is not Array. 190 Len() int 191 192 // NumField returns a struct type's field count. 193 // It panics if the type's Kind is not Struct. 194 NumField() int 195 196 // NumIn returns a function type's input parameter count. 197 // It panics if the type's Kind is not Func. 198 NumIn() int 199 200 // NumOut returns a function type's output parameter count. 201 // It panics if the type's Kind is not Func. 202 NumOut() int 203 204 // Out returns the type of a function type's i'th output parameter. 205 // It panics if the type's Kind is not Func. 206 // It panics if i is not in the range [0, NumOut()). 207 Out(i int) Type 208 209 common() *rtype 210 uncommon() *uncommonType 211 } 212 213 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 214 // if the names are equal, even if they are unexported names originating 215 // in different packages. The practical effect of this is that the result of 216 // t.FieldByName("x") is not well defined if the struct type t contains 217 // multiple fields named x (embedded from different packages). 218 // FieldByName may return one of the fields named x or may report that there are none. 219 // See https://golang.org/issue/4876 for more details. 220 221 /* 222 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 223 * A few are known to ../runtime/type.go to convey to debuggers. 224 * They are also known to ../runtime/type.go. 225 */ 226 227 // A Kind represents the specific kind of type that a Type represents. 228 // The zero Kind is not a valid kind. 229 type Kind uint 230 231 const ( 232 Invalid Kind = iota 233 Bool 234 Int 235 Int8 236 Int16 237 Int32 238 Int64 239 Uint 240 Uint8 241 Uint16 242 Uint32 243 Uint64 244 Uintptr 245 Float32 246 Float64 247 Complex64 248 Complex128 249 Array 250 Chan 251 Func 252 Interface 253 Map 254 Ptr 255 Slice 256 String 257 Struct 258 UnsafePointer 259 ) 260 261 // tflag is used by an rtype to signal what extra type information is 262 // available in the memory directly following the rtype value. 263 // 264 // tflag values must be kept in sync with copies in: 265 // cmd/compile/internal/gc/reflect.go 266 // cmd/link/internal/ld/decodesym.go 267 // runtime/type.go 268 type tflag uint8 269 270 const ( 271 // tflagUncommon means that there is a pointer, *uncommonType, 272 // just beyond the outer type structure. 273 // 274 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 275 // then t has uncommonType data and it can be accessed as: 276 // 277 // type tUncommon struct { 278 // structType 279 // u uncommonType 280 // } 281 // u := &(*tUncommon)(unsafe.Pointer(t)).u 282 tflagUncommon tflag = 1 << 0 283 284 // tflagExtraStar means the name in the str field has an 285 // extraneous '*' prefix. This is because for most types T in 286 // a program, the type *T also exists and reusing the str data 287 // saves binary size. 288 tflagExtraStar tflag = 1 << 1 289 290 // tflagNamed means the type has a name. 291 tflagNamed tflag = 1 << 2 292 293 // tflagRegularMemory means that equal and hash functions can treat 294 // this type as a single region of t.size bytes. 295 tflagRegularMemory tflag = 1 << 3 296 ) 297 298 // rtype is the common implementation of most values. 299 // It is embedded in other struct types. 300 // 301 // rtype must be kept in sync with ../runtime/type.go:/^type._type. 302 type rtype struct { 303 size uintptr 304 ptrdata uintptr // number of bytes in the type that can contain pointers 305 hash uint32 // hash of type; avoids computation in hash tables 306 tflag tflag // extra type information flags 307 align uint8 // alignment of variable with this type 308 fieldAlign uint8 // alignment of struct field with this type 309 kind uint8 // enumeration for C 310 // function for comparing objects of this type 311 // (ptr to object A, ptr to object B) -> ==? 312 equal func(unsafe.Pointer, unsafe.Pointer) bool 313 gcdata *byte // garbage collection data 314 str nameOff // string form 315 ptrToThis typeOff // type for pointer to this type, may be zero 316 } 317 318 // Method on non-interface type 319 type method struct { 320 name nameOff // name of method 321 mtyp typeOff // method type (without receiver) 322 ifn textOff // fn used in interface call (one-word receiver) 323 tfn textOff // fn used for normal method call 324 } 325 326 // uncommonType is present only for defined types or types with methods 327 // (if T is a defined type, the uncommonTypes for T and *T have methods). 328 // Using a pointer to this struct reduces the overall size required 329 // to describe a non-defined type with no methods. 330 type uncommonType struct { 331 pkgPath nameOff // import path; empty for built-in types like int, string 332 mcount uint16 // number of methods 333 xcount uint16 // number of exported methods 334 moff uint32 // offset from this uncommontype to [mcount]method 335 _ uint32 // unused 336 } 337 338 // ChanDir represents a channel type's direction. 339 type ChanDir int 340 341 const ( 342 RecvDir ChanDir = 1 << iota // <-chan 343 SendDir // chan<- 344 BothDir = RecvDir | SendDir // chan 345 ) 346 347 // arrayType represents a fixed array type. 348 type arrayType struct { 349 rtype 350 elem *rtype // array element type 351 slice *rtype // slice type 352 len uintptr 353 } 354 355 // chanType represents a channel type. 356 type chanType struct { 357 rtype 358 elem *rtype // channel element type 359 dir uintptr // channel direction (ChanDir) 360 } 361 362 // funcType represents a function type. 363 // 364 // A *rtype for each in and out parameter is stored in an array that 365 // directly follows the funcType (and possibly its uncommonType). So 366 // a function type with one method, one input, and one output is: 367 // 368 // struct { 369 // funcType 370 // uncommonType 371 // [2]*rtype // [0] is in, [1] is out 372 // } 373 type funcType struct { 374 rtype 375 inCount uint16 376 outCount uint16 // top bit is set if last input parameter is ... 377 } 378 379 // imethod represents a method on an interface type 380 type imethod struct { 381 name nameOff // name of method 382 typ typeOff // .(*FuncType) underneath 383 } 384 385 // interfaceType represents an interface type. 386 type interfaceType struct { 387 rtype 388 pkgPath name // import path 389 methods []imethod // sorted by hash 390 } 391 392 // mapType represents a map type. 393 type mapType struct { 394 rtype 395 key *rtype // map key type 396 elem *rtype // map element (value) type 397 bucket *rtype // internal bucket structure 398 // function for hashing keys (ptr to key, seed) -> hash 399 hasher func(unsafe.Pointer, uintptr) uintptr 400 keysize uint8 // size of key slot 401 valuesize uint8 // size of value slot 402 bucketsize uint16 // size of bucket 403 flags uint32 404 } 405 406 // ptrType represents a pointer type. 407 type ptrType struct { 408 rtype 409 elem *rtype // pointer element (pointed at) type 410 } 411 412 // sliceType represents a slice type. 413 type sliceType struct { 414 rtype 415 elem *rtype // slice element type 416 } 417 418 // Struct field 419 type structField struct { 420 name name // name is always non-empty 421 typ *rtype // type of field 422 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded 423 } 424 425 func (f *structField) offset() uintptr { 426 return f.offsetEmbed >> 1 427 } 428 429 func (f *structField) embedded() bool { 430 return f.offsetEmbed&1 != 0 431 } 432 433 // structType represents a struct type. 434 type structType struct { 435 rtype 436 pkgPath name 437 fields []structField // sorted by offset 438 } 439 440 // name is an encoded type name with optional extra data. 441 // 442 // The first byte is a bit field containing: 443 // 444 // 1<<0 the name is exported 445 // 1<<1 tag data follows the name 446 // 1<<2 pkgPath nameOff follows the name and tag 447 // 448 // The next two bytes are the data length: 449 // 450 // l := uint16(data[1])<<8 | uint16(data[2]) 451 // 452 // Bytes [3:3+l] are the string data. 453 // 454 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 455 // with the data following. 456 // 457 // If the import path follows, then 4 bytes at the end of 458 // the data form a nameOff. The import path is only set for concrete 459 // methods that are defined in a different package than their type. 460 // 461 // If a name starts with "*", then the exported bit represents 462 // whether the pointed to type is exported. 463 type name struct { 464 bytes *byte 465 } 466 467 func (n name) data(off int, whySafe string) *byte { 468 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe)) 469 } 470 471 func (n name) isExported() bool { 472 return (*n.bytes)&(1<<0) != 0 473 } 474 475 func (n name) nameLen() int { 476 return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field"))) 477 } 478 479 func (n name) tagLen() int { 480 if *n.data(0, "name flag field")&(1<<1) == 0 { 481 return 0 482 } 483 off := 3 + n.nameLen() 484 return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field"))) 485 } 486 487 func (n name) name() (s string) { 488 if n.bytes == nil { 489 return 490 } 491 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 492 493 hdr := (*stringHeader)(unsafe.Pointer(&s)) 494 hdr.Data = unsafe.Pointer(&b[3]) 495 hdr.Len = int(b[1])<<8 | int(b[2]) 496 return s 497 } 498 499 func (n name) tag() (s string) { 500 tl := n.tagLen() 501 if tl == 0 { 502 return "" 503 } 504 nl := n.nameLen() 505 hdr := (*stringHeader)(unsafe.Pointer(&s)) 506 hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string")) 507 hdr.Len = tl 508 return s 509 } 510 511 func (n name) pkgPath() string { 512 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 { 513 return "" 514 } 515 off := 3 + n.nameLen() 516 if tl := n.tagLen(); tl > 0 { 517 off += 2 + tl 518 } 519 var nameOff int32 520 // Note that this field may not be aligned in memory, 521 // so we cannot use a direct int32 assignment here. 522 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:]) 523 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 524 return pkgPathName.name() 525 } 526 527 func newName(n, tag string, exported bool) name { 528 if len(n) > 1<<16-1 { 529 panic("reflect.nameFrom: name too long: " + n) 530 } 531 if len(tag) > 1<<16-1 { 532 panic("reflect.nameFrom: tag too long: " + tag) 533 } 534 535 var bits byte 536 l := 1 + 2 + len(n) 537 if exported { 538 bits |= 1 << 0 539 } 540 if len(tag) > 0 { 541 l += 2 + len(tag) 542 bits |= 1 << 1 543 } 544 545 b := make([]byte, l) 546 b[0] = bits 547 b[1] = uint8(len(n) >> 8) 548 b[2] = uint8(len(n)) 549 copy(b[3:], n) 550 if len(tag) > 0 { 551 tb := b[3+len(n):] 552 tb[0] = uint8(len(tag) >> 8) 553 tb[1] = uint8(len(tag)) 554 copy(tb[2:], tag) 555 } 556 557 return name{bytes: &b[0]} 558 } 559 560 /* 561 * The compiler knows the exact layout of all the data structures above. 562 * The compiler does not know about the data structures and methods below. 563 */ 564 565 // Method represents a single method. 566 type Method struct { 567 // Name is the method name. 568 // PkgPath is the package path that qualifies a lower case (unexported) 569 // method name. It is empty for upper case (exported) method names. 570 // The combination of PkgPath and Name uniquely identifies a method 571 // in a method set. 572 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 573 Name string 574 PkgPath string 575 576 Type Type // method type 577 Func Value // func with receiver as first argument 578 Index int // index for Type.Method 579 } 580 581 const ( 582 kindDirectIface = 1 << 5 583 kindGCProg = 1 << 6 // Type.gc points to GC program 584 kindMask = (1 << 5) - 1 585 ) 586 587 // String returns the name of k. 588 func (k Kind) String() string { 589 if int(k) < len(kindNames) { 590 return kindNames[k] 591 } 592 return "kind" + strconv.Itoa(int(k)) 593 } 594 595 var kindNames = []string{ 596 Invalid: "invalid", 597 Bool: "bool", 598 Int: "int", 599 Int8: "int8", 600 Int16: "int16", 601 Int32: "int32", 602 Int64: "int64", 603 Uint: "uint", 604 Uint8: "uint8", 605 Uint16: "uint16", 606 Uint32: "uint32", 607 Uint64: "uint64", 608 Uintptr: "uintptr", 609 Float32: "float32", 610 Float64: "float64", 611 Complex64: "complex64", 612 Complex128: "complex128", 613 Array: "array", 614 Chan: "chan", 615 Func: "func", 616 Interface: "interface", 617 Map: "map", 618 Ptr: "ptr", 619 Slice: "slice", 620 String: "string", 621 Struct: "struct", 622 UnsafePointer: "unsafe.Pointer", 623 } 624 625 func (t *uncommonType) methods() []method { 626 if t.mcount == 0 { 627 return nil 628 } 629 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] 630 } 631 632 func (t *uncommonType) exportedMethods() []method { 633 if t.xcount == 0 { 634 return nil 635 } 636 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount] 637 } 638 639 // resolveNameOff resolves a name offset from a base pointer. 640 // The (*rtype).nameOff method is a convenience wrapper for this function. 641 // Implemented in the runtime package. 642 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 643 644 // resolveTypeOff resolves an *rtype offset from a base type. 645 // The (*rtype).typeOff method is a convenience wrapper for this function. 646 // Implemented in the runtime package. 647 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 648 649 // resolveTextOff resolves a function pointer offset from a base type. 650 // The (*rtype).textOff method is a convenience wrapper for this function. 651 // Implemented in the runtime package. 652 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 653 654 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 655 // It returns a new ID that can be used as a typeOff or textOff, and will 656 // be resolved correctly. Implemented in the runtime package. 657 func addReflectOff(ptr unsafe.Pointer) int32 658 659 // resolveReflectType adds a name to the reflection lookup map in the runtime. 660 // It returns a new nameOff that can be used to refer to the pointer. 661 func resolveReflectName(n name) nameOff { 662 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 663 } 664 665 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 666 // It returns a new typeOff that can be used to refer to the pointer. 667 func resolveReflectType(t *rtype) typeOff { 668 return typeOff(addReflectOff(unsafe.Pointer(t))) 669 } 670 671 // resolveReflectText adds a function pointer to the reflection lookup map in 672 // the runtime. It returns a new textOff that can be used to refer to the 673 // pointer. 674 func resolveReflectText(ptr unsafe.Pointer) textOff { 675 return textOff(addReflectOff(ptr)) 676 } 677 678 type nameOff int32 // offset to a name 679 type typeOff int32 // offset to an *rtype 680 type textOff int32 // offset from top of text section 681 682 func (t *rtype) nameOff(off nameOff) name { 683 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 684 } 685 686 func (t *rtype) typeOff(off typeOff) *rtype { 687 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 688 } 689 690 func (t *rtype) textOff(off textOff) unsafe.Pointer { 691 return resolveTextOff(unsafe.Pointer(t), int32(off)) 692 } 693 694 func (t *rtype) uncommon() *uncommonType { 695 if t.tflag&tflagUncommon == 0 { 696 return nil 697 } 698 switch t.Kind() { 699 case Struct: 700 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 701 case Ptr: 702 type u struct { 703 ptrType 704 u uncommonType 705 } 706 return &(*u)(unsafe.Pointer(t)).u 707 case Func: 708 type u struct { 709 funcType 710 u uncommonType 711 } 712 return &(*u)(unsafe.Pointer(t)).u 713 case Slice: 714 type u struct { 715 sliceType 716 u uncommonType 717 } 718 return &(*u)(unsafe.Pointer(t)).u 719 case Array: 720 type u struct { 721 arrayType 722 u uncommonType 723 } 724 return &(*u)(unsafe.Pointer(t)).u 725 case Chan: 726 type u struct { 727 chanType 728 u uncommonType 729 } 730 return &(*u)(unsafe.Pointer(t)).u 731 case Map: 732 type u struct { 733 mapType 734 u uncommonType 735 } 736 return &(*u)(unsafe.Pointer(t)).u 737 case Interface: 738 type u struct { 739 interfaceType 740 u uncommonType 741 } 742 return &(*u)(unsafe.Pointer(t)).u 743 default: 744 type u struct { 745 rtype 746 u uncommonType 747 } 748 return &(*u)(unsafe.Pointer(t)).u 749 } 750 } 751 752 func (t *rtype) String() string { 753 s := t.nameOff(t.str).name() 754 if t.tflag&tflagExtraStar != 0 { 755 return s[1:] 756 } 757 return s 758 } 759 760 func (t *rtype) Size() uintptr { return t.size } 761 762 func (t *rtype) Bits() int { 763 if t == nil { 764 panic("reflect: Bits of nil Type") 765 } 766 k := t.Kind() 767 if k < Int || k > Complex128 { 768 panic("reflect: Bits of non-arithmetic Type " + t.String()) 769 } 770 return int(t.size) * 8 771 } 772 773 func (t *rtype) Align() int { return int(t.align) } 774 775 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 776 777 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 778 779 func (t *rtype) pointers() bool { return t.ptrdata != 0 } 780 781 func (t *rtype) common() *rtype { return t } 782 783 func (t *rtype) exportedMethods() []method { 784 ut := t.uncommon() 785 if ut == nil { 786 return nil 787 } 788 return ut.exportedMethods() 789 } 790 791 func (t *rtype) NumMethod() int { 792 if t.Kind() == Interface { 793 tt := (*interfaceType)(unsafe.Pointer(t)) 794 return tt.NumMethod() 795 } 796 return len(t.exportedMethods()) 797 } 798 799 func (t *rtype) Method(i int) (m Method) { 800 if t.Kind() == Interface { 801 tt := (*interfaceType)(unsafe.Pointer(t)) 802 return tt.Method(i) 803 } 804 methods := t.exportedMethods() 805 if i < 0 || i >= len(methods) { 806 panic("reflect: Method index out of range") 807 } 808 p := methods[i] 809 pname := t.nameOff(p.name) 810 m.Name = pname.name() 811 fl := flag(Func) 812 mtyp := t.typeOff(p.mtyp) 813 ft := (*funcType)(unsafe.Pointer(mtyp)) 814 in := make([]Type, 0, 1+len(ft.in())) 815 in = append(in, t) 816 for _, arg := range ft.in() { 817 in = append(in, arg) 818 } 819 out := make([]Type, 0, len(ft.out())) 820 for _, ret := range ft.out() { 821 out = append(out, ret) 822 } 823 mt := FuncOf(in, out, ft.IsVariadic()) 824 m.Type = mt 825 tfn := t.textOff(p.tfn) 826 fn := unsafe.Pointer(&tfn) 827 m.Func = Value{mt.(*rtype), fn, fl} 828 829 m.Index = i 830 return m 831 } 832 833 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 834 if t.Kind() == Interface { 835 tt := (*interfaceType)(unsafe.Pointer(t)) 836 return tt.MethodByName(name) 837 } 838 ut := t.uncommon() 839 if ut == nil { 840 return Method{}, false 841 } 842 // TODO(mdempsky): Binary search. 843 for i, p := range ut.exportedMethods() { 844 if t.nameOff(p.name).name() == name { 845 return t.Method(i), true 846 } 847 } 848 return Method{}, false 849 } 850 851 func (t *rtype) PkgPath() string { 852 if t.tflag&tflagNamed == 0 { 853 return "" 854 } 855 ut := t.uncommon() 856 if ut == nil { 857 return "" 858 } 859 return t.nameOff(ut.pkgPath).name() 860 } 861 862 func (t *rtype) hasName() bool { 863 return t.tflag&tflagNamed != 0 864 } 865 866 func (t *rtype) Name() string { 867 if !t.hasName() { 868 return "" 869 } 870 s := t.String() 871 i := len(s) - 1 872 for i >= 0 && s[i] != '.' { 873 i-- 874 } 875 return s[i+1:] 876 } 877 878 func (t *rtype) ChanDir() ChanDir { 879 if t.Kind() != Chan { 880 panic("reflect: ChanDir of non-chan type " + t.String()) 881 } 882 tt := (*chanType)(unsafe.Pointer(t)) 883 return ChanDir(tt.dir) 884 } 885 886 func (t *rtype) IsVariadic() bool { 887 if t.Kind() != Func { 888 panic("reflect: IsVariadic of non-func type " + t.String()) 889 } 890 tt := (*funcType)(unsafe.Pointer(t)) 891 return tt.outCount&(1<<15) != 0 892 } 893 894 func (t *rtype) Elem() Type { 895 switch t.Kind() { 896 case Array: 897 tt := (*arrayType)(unsafe.Pointer(t)) 898 return toType(tt.elem) 899 case Chan: 900 tt := (*chanType)(unsafe.Pointer(t)) 901 return toType(tt.elem) 902 case Map: 903 tt := (*mapType)(unsafe.Pointer(t)) 904 return toType(tt.elem) 905 case Ptr: 906 tt := (*ptrType)(unsafe.Pointer(t)) 907 return toType(tt.elem) 908 case Slice: 909 tt := (*sliceType)(unsafe.Pointer(t)) 910 return toType(tt.elem) 911 } 912 panic("reflect: Elem of invalid type " + t.String()) 913 } 914 915 func (t *rtype) Field(i int) StructField { 916 if t.Kind() != Struct { 917 panic("reflect: Field of non-struct type " + t.String()) 918 } 919 tt := (*structType)(unsafe.Pointer(t)) 920 return tt.Field(i) 921 } 922 923 func (t *rtype) FieldByIndex(index []int) StructField { 924 if t.Kind() != Struct { 925 panic("reflect: FieldByIndex of non-struct type " + t.String()) 926 } 927 tt := (*structType)(unsafe.Pointer(t)) 928 return tt.FieldByIndex(index) 929 } 930 931 func (t *rtype) FieldByName(name string) (StructField, bool) { 932 if t.Kind() != Struct { 933 panic("reflect: FieldByName of non-struct type " + t.String()) 934 } 935 tt := (*structType)(unsafe.Pointer(t)) 936 return tt.FieldByName(name) 937 } 938 939 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 940 if t.Kind() != Struct { 941 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 942 } 943 tt := (*structType)(unsafe.Pointer(t)) 944 return tt.FieldByNameFunc(match) 945 } 946 947 func (t *rtype) In(i int) Type { 948 if t.Kind() != Func { 949 panic("reflect: In of non-func type " + t.String()) 950 } 951 tt := (*funcType)(unsafe.Pointer(t)) 952 return toType(tt.in()[i]) 953 } 954 955 func (t *rtype) Key() Type { 956 if t.Kind() != Map { 957 panic("reflect: Key of non-map type " + t.String()) 958 } 959 tt := (*mapType)(unsafe.Pointer(t)) 960 return toType(tt.key) 961 } 962 963 func (t *rtype) Len() int { 964 if t.Kind() != Array { 965 panic("reflect: Len of non-array type " + t.String()) 966 } 967 tt := (*arrayType)(unsafe.Pointer(t)) 968 return int(tt.len) 969 } 970 971 func (t *rtype) NumField() int { 972 if t.Kind() != Struct { 973 panic("reflect: NumField of non-struct type " + t.String()) 974 } 975 tt := (*structType)(unsafe.Pointer(t)) 976 return len(tt.fields) 977 } 978 979 func (t *rtype) NumIn() int { 980 if t.Kind() != Func { 981 panic("reflect: NumIn of non-func type " + t.String()) 982 } 983 tt := (*funcType)(unsafe.Pointer(t)) 984 return int(tt.inCount) 985 } 986 987 func (t *rtype) NumOut() int { 988 if t.Kind() != Func { 989 panic("reflect: NumOut of non-func type " + t.String()) 990 } 991 tt := (*funcType)(unsafe.Pointer(t)) 992 return len(tt.out()) 993 } 994 995 func (t *rtype) Out(i int) Type { 996 if t.Kind() != Func { 997 panic("reflect: Out of non-func type " + t.String()) 998 } 999 tt := (*funcType)(unsafe.Pointer(t)) 1000 return toType(tt.out()[i]) 1001 } 1002 1003 func (t *funcType) in() []*rtype { 1004 uadd := unsafe.Sizeof(*t) 1005 if t.tflag&tflagUncommon != 0 { 1006 uadd += unsafe.Sizeof(uncommonType{}) 1007 } 1008 if t.inCount == 0 { 1009 return nil 1010 } 1011 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount] 1012 } 1013 1014 func (t *funcType) out() []*rtype { 1015 uadd := unsafe.Sizeof(*t) 1016 if t.tflag&tflagUncommon != 0 { 1017 uadd += unsafe.Sizeof(uncommonType{}) 1018 } 1019 outCount := t.outCount & (1<<15 - 1) 1020 if outCount == 0 { 1021 return nil 1022 } 1023 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount] 1024 } 1025 1026 // add returns p+x. 1027 // 1028 // The whySafe string is ignored, so that the function still inlines 1029 // as efficiently as p+x, but all call sites should use the string to 1030 // record why the addition is safe, which is to say why the addition 1031 // does not cause x to advance to the very end of p's allocation 1032 // and therefore point incorrectly at the next block in memory. 1033 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 1034 return unsafe.Pointer(uintptr(p) + x) 1035 } 1036 1037 func (d ChanDir) String() string { 1038 switch d { 1039 case SendDir: 1040 return "chan<-" 1041 case RecvDir: 1042 return "<-chan" 1043 case BothDir: 1044 return "chan" 1045 } 1046 return "ChanDir" + strconv.Itoa(int(d)) 1047 } 1048 1049 // Method returns the i'th method in the type's method set. 1050 func (t *interfaceType) Method(i int) (m Method) { 1051 if i < 0 || i >= len(t.methods) { 1052 return 1053 } 1054 p := &t.methods[i] 1055 pname := t.nameOff(p.name) 1056 m.Name = pname.name() 1057 if !pname.isExported() { 1058 m.PkgPath = pname.pkgPath() 1059 if m.PkgPath == "" { 1060 m.PkgPath = t.pkgPath.name() 1061 } 1062 } 1063 m.Type = toType(t.typeOff(p.typ)) 1064 m.Index = i 1065 return 1066 } 1067 1068 // NumMethod returns the number of interface methods in the type's method set. 1069 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1070 1071 // MethodByName method with the given name in the type's method set. 1072 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1073 if t == nil { 1074 return 1075 } 1076 var p *imethod 1077 for i := range t.methods { 1078 p = &t.methods[i] 1079 if t.nameOff(p.name).name() == name { 1080 return t.Method(i), true 1081 } 1082 } 1083 return 1084 } 1085 1086 // A StructField describes a single field in a struct. 1087 type StructField struct { 1088 // Name is the field name. 1089 Name string 1090 // PkgPath is the package path that qualifies a lower case (unexported) 1091 // field name. It is empty for upper case (exported) field names. 1092 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1093 PkgPath string 1094 1095 Type Type // field type 1096 Tag StructTag // field tag string 1097 Offset uintptr // offset within struct, in bytes 1098 Index []int // index sequence for Type.FieldByIndex 1099 Anonymous bool // is an embedded field 1100 } 1101 1102 // A StructTag is the tag string in a struct field. 1103 // 1104 // By convention, tag strings are a concatenation of 1105 // optionally space-separated key:"value" pairs. 1106 // Each key is a non-empty string consisting of non-control 1107 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1108 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1109 // characters and Go string literal syntax. 1110 type StructTag string 1111 1112 // Get returns the value associated with key in the tag string. 1113 // If there is no such key in the tag, Get returns the empty string. 1114 // If the tag does not have the conventional format, the value 1115 // returned by Get is unspecified. To determine whether a tag is 1116 // explicitly set to the empty string, use Lookup. 1117 func (tag StructTag) Get(key string) string { 1118 v, _ := tag.Lookup(key) 1119 return v 1120 } 1121 1122 // Lookup returns the value associated with key in the tag string. 1123 // If the key is present in the tag the value (which may be empty) 1124 // is returned. Otherwise the returned value will be the empty string. 1125 // The ok return value reports whether the value was explicitly set in 1126 // the tag string. If the tag does not have the conventional format, 1127 // the value returned by Lookup is unspecified. 1128 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1129 // When modifying this code, also update the validateStructTag code 1130 // in cmd/vet/structtag.go. 1131 1132 for tag != "" { 1133 // Skip leading space. 1134 i := 0 1135 for i < len(tag) && tag[i] == ' ' { 1136 i++ 1137 } 1138 tag = tag[i:] 1139 if tag == "" { 1140 break 1141 } 1142 1143 // Scan to colon. A space, a quote or a control character is a syntax error. 1144 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1145 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1146 // as it is simpler to inspect the tag's bytes than the tag's runes. 1147 i = 0 1148 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1149 i++ 1150 } 1151 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1152 break 1153 } 1154 name := string(tag[:i]) 1155 tag = tag[i+1:] 1156 1157 // Scan quoted string to find value. 1158 i = 1 1159 for i < len(tag) && tag[i] != '"' { 1160 if tag[i] == '\\' { 1161 i++ 1162 } 1163 i++ 1164 } 1165 if i >= len(tag) { 1166 break 1167 } 1168 qvalue := string(tag[:i+1]) 1169 tag = tag[i+1:] 1170 1171 if key == name { 1172 value, err := strconv.Unquote(qvalue) 1173 if err != nil { 1174 break 1175 } 1176 return value, true 1177 } 1178 } 1179 return "", false 1180 } 1181 1182 // Field returns the i'th struct field. 1183 func (t *structType) Field(i int) (f StructField) { 1184 if i < 0 || i >= len(t.fields) { 1185 panic("reflect: Field index out of bounds") 1186 } 1187 p := &t.fields[i] 1188 f.Type = toType(p.typ) 1189 f.Name = p.name.name() 1190 f.Anonymous = p.embedded() 1191 if !p.name.isExported() { 1192 f.PkgPath = t.pkgPath.name() 1193 } 1194 if tag := p.name.tag(); tag != "" { 1195 f.Tag = StructTag(tag) 1196 } 1197 f.Offset = p.offset() 1198 1199 // NOTE(rsc): This is the only allocation in the interface 1200 // presented by a reflect.Type. It would be nice to avoid, 1201 // at least in the common cases, but we need to make sure 1202 // that misbehaving clients of reflect cannot affect other 1203 // uses of reflect. One possibility is CL 5371098, but we 1204 // postponed that ugliness until there is a demonstrated 1205 // need for the performance. This is issue 2320. 1206 f.Index = []int{i} 1207 return 1208 } 1209 1210 // TODO(gri): Should there be an error/bool indicator if the index 1211 // is wrong for FieldByIndex? 1212 1213 // FieldByIndex returns the nested field corresponding to index. 1214 func (t *structType) FieldByIndex(index []int) (f StructField) { 1215 f.Type = toType(&t.rtype) 1216 for i, x := range index { 1217 if i > 0 { 1218 ft := f.Type 1219 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1220 ft = ft.Elem() 1221 } 1222 f.Type = ft 1223 } 1224 f = f.Type.Field(x) 1225 } 1226 return 1227 } 1228 1229 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1230 type fieldScan struct { 1231 typ *structType 1232 index []int 1233 } 1234 1235 // FieldByNameFunc returns the struct field with a name that satisfies the 1236 // match function and a boolean to indicate if the field was found. 1237 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1238 // This uses the same condition that the Go language does: there must be a unique instance 1239 // of the match at a given depth level. If there are multiple instances of a match at the 1240 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1241 // The algorithm is breadth first search, one depth level at a time. 1242 1243 // The current and next slices are work queues: 1244 // current lists the fields to visit on this depth level, 1245 // and next lists the fields on the next lower level. 1246 current := []fieldScan{} 1247 next := []fieldScan{{typ: t}} 1248 1249 // nextCount records the number of times an embedded type has been 1250 // encountered and considered for queueing in the 'next' slice. 1251 // We only queue the first one, but we increment the count on each. 1252 // If a struct type T can be reached more than once at a given depth level, 1253 // then it annihilates itself and need not be considered at all when we 1254 // process that next depth level. 1255 var nextCount map[*structType]int 1256 1257 // visited records the structs that have been considered already. 1258 // Embedded pointer fields can create cycles in the graph of 1259 // reachable embedded types; visited avoids following those cycles. 1260 // It also avoids duplicated effort: if we didn't find the field in an 1261 // embedded type T at level 2, we won't find it in one at level 4 either. 1262 visited := map[*structType]bool{} 1263 1264 for len(next) > 0 { 1265 current, next = next, current[:0] 1266 count := nextCount 1267 nextCount = nil 1268 1269 // Process all the fields at this depth, now listed in 'current'. 1270 // The loop queues embedded fields found in 'next', for processing during the next 1271 // iteration. The multiplicity of the 'current' field counts is recorded 1272 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1273 for _, scan := range current { 1274 t := scan.typ 1275 if visited[t] { 1276 // We've looked through this type before, at a higher level. 1277 // That higher level would shadow the lower level we're now at, 1278 // so this one can't be useful to us. Ignore it. 1279 continue 1280 } 1281 visited[t] = true 1282 for i := range t.fields { 1283 f := &t.fields[i] 1284 // Find name and (for embedded field) type for field f. 1285 fname := f.name.name() 1286 var ntyp *rtype 1287 if f.embedded() { 1288 // Embedded field of type T or *T. 1289 ntyp = f.typ 1290 if ntyp.Kind() == Ptr { 1291 ntyp = ntyp.Elem().common() 1292 } 1293 } 1294 1295 // Does it match? 1296 if match(fname) { 1297 // Potential match 1298 if count[t] > 1 || ok { 1299 // Name appeared multiple times at this level: annihilate. 1300 return StructField{}, false 1301 } 1302 result = t.Field(i) 1303 result.Index = nil 1304 result.Index = append(result.Index, scan.index...) 1305 result.Index = append(result.Index, i) 1306 ok = true 1307 continue 1308 } 1309 1310 // Queue embedded struct fields for processing with next level, 1311 // but only if we haven't seen a match yet at this level and only 1312 // if the embedded types haven't already been queued. 1313 if ok || ntyp == nil || ntyp.Kind() != Struct { 1314 continue 1315 } 1316 styp := (*structType)(unsafe.Pointer(ntyp)) 1317 if nextCount[styp] > 0 { 1318 nextCount[styp] = 2 // exact multiple doesn't matter 1319 continue 1320 } 1321 if nextCount == nil { 1322 nextCount = map[*structType]int{} 1323 } 1324 nextCount[styp] = 1 1325 if count[t] > 1 { 1326 nextCount[styp] = 2 // exact multiple doesn't matter 1327 } 1328 var index []int 1329 index = append(index, scan.index...) 1330 index = append(index, i) 1331 next = append(next, fieldScan{styp, index}) 1332 } 1333 } 1334 if ok { 1335 break 1336 } 1337 } 1338 return 1339 } 1340 1341 // FieldByName returns the struct field with the given name 1342 // and a boolean to indicate if the field was found. 1343 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1344 // Quick check for top-level name, or struct without embedded fields. 1345 hasEmbeds := false 1346 if name != "" { 1347 for i := range t.fields { 1348 tf := &t.fields[i] 1349 if tf.name.name() == name { 1350 return t.Field(i), true 1351 } 1352 if tf.embedded() { 1353 hasEmbeds = true 1354 } 1355 } 1356 } 1357 if !hasEmbeds { 1358 return 1359 } 1360 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1361 } 1362 1363 // TypeOf returns the reflection Type that represents the dynamic type of i. 1364 // If i is a nil interface value, TypeOf returns nil. 1365 func TypeOf(i interface{}) Type { 1366 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1367 return toType(eface.typ) 1368 } 1369 1370 // ptrMap is the cache for PtrTo. 1371 var ptrMap sync.Map // map[*rtype]*ptrType 1372 1373 // PtrTo returns the pointer type with element t. 1374 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1375 func PtrTo(t Type) Type { 1376 return t.(*rtype).ptrTo() 1377 } 1378 1379 func (t *rtype) ptrTo() *rtype { 1380 if t.ptrToThis != 0 { 1381 return t.typeOff(t.ptrToThis) 1382 } 1383 1384 // Check the cache. 1385 if pi, ok := ptrMap.Load(t); ok { 1386 return &pi.(*ptrType).rtype 1387 } 1388 1389 // Look in known types. 1390 s := "*" + t.String() 1391 for _, tt := range typesByString(s) { 1392 p := (*ptrType)(unsafe.Pointer(tt)) 1393 if p.elem != t { 1394 continue 1395 } 1396 pi, _ := ptrMap.LoadOrStore(t, p) 1397 return &pi.(*ptrType).rtype 1398 } 1399 1400 // Create a new ptrType starting with the description 1401 // of an *unsafe.Pointer. 1402 var iptr interface{} = (*unsafe.Pointer)(nil) 1403 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1404 pp := *prototype 1405 1406 pp.str = resolveReflectName(newName(s, "", false)) 1407 pp.ptrToThis = 0 1408 1409 // For the type structures linked into the binary, the 1410 // compiler provides a good hash of the string. 1411 // Create a good hash for the new string by using 1412 // the FNV-1 hash's mixing function to combine the 1413 // old hash and the new "*". 1414 pp.hash = fnv1(t.hash, '*') 1415 1416 pp.elem = t 1417 1418 pi, _ := ptrMap.LoadOrStore(t, &pp) 1419 return &pi.(*ptrType).rtype 1420 } 1421 1422 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1423 func fnv1(x uint32, list ...byte) uint32 { 1424 for _, b := range list { 1425 x = x*16777619 ^ uint32(b) 1426 } 1427 return x 1428 } 1429 1430 func (t *rtype) Implements(u Type) bool { 1431 if u == nil { 1432 panic("reflect: nil type passed to Type.Implements") 1433 } 1434 if u.Kind() != Interface { 1435 panic("reflect: non-interface type passed to Type.Implements") 1436 } 1437 return implements(u.(*rtype), t) 1438 } 1439 1440 func (t *rtype) AssignableTo(u Type) bool { 1441 if u == nil { 1442 panic("reflect: nil type passed to Type.AssignableTo") 1443 } 1444 uu := u.(*rtype) 1445 return directlyAssignable(uu, t) || implements(uu, t) 1446 } 1447 1448 func (t *rtype) ConvertibleTo(u Type) bool { 1449 if u == nil { 1450 panic("reflect: nil type passed to Type.ConvertibleTo") 1451 } 1452 uu := u.(*rtype) 1453 return convertOp(uu, t) != nil 1454 } 1455 1456 func (t *rtype) Comparable() bool { 1457 return t.equal != nil 1458 } 1459 1460 // implements reports whether the type V implements the interface type T. 1461 func implements(T, V *rtype) bool { 1462 if T.Kind() != Interface { 1463 return false 1464 } 1465 t := (*interfaceType)(unsafe.Pointer(T)) 1466 if len(t.methods) == 0 { 1467 return true 1468 } 1469 1470 // The same algorithm applies in both cases, but the 1471 // method tables for an interface type and a concrete type 1472 // are different, so the code is duplicated. 1473 // In both cases the algorithm is a linear scan over the two 1474 // lists - T's methods and V's methods - simultaneously. 1475 // Since method tables are stored in a unique sorted order 1476 // (alphabetical, with no duplicate method names), the scan 1477 // through V's methods must hit a match for each of T's 1478 // methods along the way, or else V does not implement T. 1479 // This lets us run the scan in overall linear time instead of 1480 // the quadratic time a naive search would require. 1481 // See also ../runtime/iface.go. 1482 if V.Kind() == Interface { 1483 v := (*interfaceType)(unsafe.Pointer(V)) 1484 i := 0 1485 for j := 0; j < len(v.methods); j++ { 1486 tm := &t.methods[i] 1487 tmName := t.nameOff(tm.name) 1488 vm := &v.methods[j] 1489 vmName := V.nameOff(vm.name) 1490 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1491 if !tmName.isExported() { 1492 tmPkgPath := tmName.pkgPath() 1493 if tmPkgPath == "" { 1494 tmPkgPath = t.pkgPath.name() 1495 } 1496 vmPkgPath := vmName.pkgPath() 1497 if vmPkgPath == "" { 1498 vmPkgPath = v.pkgPath.name() 1499 } 1500 if tmPkgPath != vmPkgPath { 1501 continue 1502 } 1503 } 1504 if i++; i >= len(t.methods) { 1505 return true 1506 } 1507 } 1508 } 1509 return false 1510 } 1511 1512 v := V.uncommon() 1513 if v == nil { 1514 return false 1515 } 1516 i := 0 1517 vmethods := v.methods() 1518 for j := 0; j < int(v.mcount); j++ { 1519 tm := &t.methods[i] 1520 tmName := t.nameOff(tm.name) 1521 vm := vmethods[j] 1522 vmName := V.nameOff(vm.name) 1523 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1524 if !tmName.isExported() { 1525 tmPkgPath := tmName.pkgPath() 1526 if tmPkgPath == "" { 1527 tmPkgPath = t.pkgPath.name() 1528 } 1529 vmPkgPath := vmName.pkgPath() 1530 if vmPkgPath == "" { 1531 vmPkgPath = V.nameOff(v.pkgPath).name() 1532 } 1533 if tmPkgPath != vmPkgPath { 1534 continue 1535 } 1536 } 1537 if i++; i >= len(t.methods) { 1538 return true 1539 } 1540 } 1541 } 1542 return false 1543 } 1544 1545 // specialChannelAssignability reports whether a value x of channel type V 1546 // can be directly assigned (using memmove) to another channel type T. 1547 // https://golang.org/doc/go_spec.html#Assignability 1548 // T and V must be both of Chan kind. 1549 func specialChannelAssignability(T, V *rtype) bool { 1550 // Special case: 1551 // x is a bidirectional channel value, T is a channel type, 1552 // x's type V and T have identical element types, 1553 // and at least one of V or T is not a defined type. 1554 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1555 } 1556 1557 // directlyAssignable reports whether a value x of type V can be directly 1558 // assigned (using memmove) to a value of type T. 1559 // https://golang.org/doc/go_spec.html#Assignability 1560 // Ignoring the interface rules (implemented elsewhere) 1561 // and the ideal constant rules (no ideal constants at run time). 1562 func directlyAssignable(T, V *rtype) bool { 1563 // x's type V is identical to T? 1564 if T == V { 1565 return true 1566 } 1567 1568 // Otherwise at least one of T and V must not be defined 1569 // and they must have the same kind. 1570 if T.hasName() && V.hasName() || T.Kind() != V.Kind() { 1571 return false 1572 } 1573 1574 if T.Kind() == Chan && specialChannelAssignability(T, V) { 1575 return true 1576 } 1577 1578 // x's type T and V must have identical underlying types. 1579 return haveIdenticalUnderlyingType(T, V, true) 1580 } 1581 1582 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1583 if cmpTags { 1584 return T == V 1585 } 1586 1587 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1588 return false 1589 } 1590 1591 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1592 } 1593 1594 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1595 if T == V { 1596 return true 1597 } 1598 1599 kind := T.Kind() 1600 if kind != V.Kind() { 1601 return false 1602 } 1603 1604 // Non-composite types of equal kind have same underlying type 1605 // (the predefined instance of the type). 1606 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1607 return true 1608 } 1609 1610 // Composite types. 1611 switch kind { 1612 case Array: 1613 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1614 1615 case Chan: 1616 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1617 1618 case Func: 1619 t := (*funcType)(unsafe.Pointer(T)) 1620 v := (*funcType)(unsafe.Pointer(V)) 1621 if t.outCount != v.outCount || t.inCount != v.inCount { 1622 return false 1623 } 1624 for i := 0; i < t.NumIn(); i++ { 1625 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1626 return false 1627 } 1628 } 1629 for i := 0; i < t.NumOut(); i++ { 1630 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1631 return false 1632 } 1633 } 1634 return true 1635 1636 case Interface: 1637 t := (*interfaceType)(unsafe.Pointer(T)) 1638 v := (*interfaceType)(unsafe.Pointer(V)) 1639 if len(t.methods) == 0 && len(v.methods) == 0 { 1640 return true 1641 } 1642 // Might have the same methods but still 1643 // need a run time conversion. 1644 return false 1645 1646 case Map: 1647 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1648 1649 case Ptr, Slice: 1650 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1651 1652 case Struct: 1653 t := (*structType)(unsafe.Pointer(T)) 1654 v := (*structType)(unsafe.Pointer(V)) 1655 if len(t.fields) != len(v.fields) { 1656 return false 1657 } 1658 if t.pkgPath.name() != v.pkgPath.name() { 1659 return false 1660 } 1661 for i := range t.fields { 1662 tf := &t.fields[i] 1663 vf := &v.fields[i] 1664 if tf.name.name() != vf.name.name() { 1665 return false 1666 } 1667 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1668 return false 1669 } 1670 if cmpTags && tf.name.tag() != vf.name.tag() { 1671 return false 1672 } 1673 if tf.offsetEmbed != vf.offsetEmbed { 1674 return false 1675 } 1676 } 1677 return true 1678 } 1679 1680 return false 1681 } 1682 1683 // typelinks is implemented in package runtime. 1684 // It returns a slice of the sections in each module, 1685 // and a slice of *rtype offsets in each module. 1686 // 1687 // The types in each module are sorted by string. That is, the first 1688 // two linked types of the first module are: 1689 // 1690 // d0 := sections[0] 1691 // t1 := (*rtype)(add(d0, offset[0][0])) 1692 // t2 := (*rtype)(add(d0, offset[0][1])) 1693 // 1694 // and 1695 // 1696 // t1.String() < t2.String() 1697 // 1698 // Note that strings are not unique identifiers for types: 1699 // there can be more than one with a given string. 1700 // Only types we might want to look up are included: 1701 // pointers, channels, maps, slices, and arrays. 1702 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1703 1704 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1705 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1706 } 1707 1708 // typesByString returns the subslice of typelinks() whose elements have 1709 // the given string representation. 1710 // It may be empty (no known types with that string) or may have 1711 // multiple elements (multiple types with that string). 1712 func typesByString(s string) []*rtype { 1713 sections, offset := typelinks() 1714 var ret []*rtype 1715 1716 for offsI, offs := range offset { 1717 section := sections[offsI] 1718 1719 // We are looking for the first index i where the string becomes >= s. 1720 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1721 i, j := 0, len(offs) 1722 for i < j { 1723 h := i + (j-i)/2 // avoid overflow when computing h 1724 // i ≤ h < j 1725 if !(rtypeOff(section, offs[h]).String() >= s) { 1726 i = h + 1 // preserves f(i-1) == false 1727 } else { 1728 j = h // preserves f(j) == true 1729 } 1730 } 1731 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1732 1733 // Having found the first, linear scan forward to find the last. 1734 // We could do a second binary search, but the caller is going 1735 // to do a linear scan anyway. 1736 for j := i; j < len(offs); j++ { 1737 typ := rtypeOff(section, offs[j]) 1738 if typ.String() != s { 1739 break 1740 } 1741 ret = append(ret, typ) 1742 } 1743 } 1744 return ret 1745 } 1746 1747 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1748 var lookupCache sync.Map // map[cacheKey]*rtype 1749 1750 // A cacheKey is the key for use in the lookupCache. 1751 // Four values describe any of the types we are looking for: 1752 // type kind, one or two subtypes, and an extra integer. 1753 type cacheKey struct { 1754 kind Kind 1755 t1 *rtype 1756 t2 *rtype 1757 extra uintptr 1758 } 1759 1760 // The funcLookupCache caches FuncOf lookups. 1761 // FuncOf does not share the common lookupCache since cacheKey is not 1762 // sufficient to represent functions unambiguously. 1763 var funcLookupCache struct { 1764 sync.Mutex // Guards stores (but not loads) on m. 1765 1766 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1767 // Elements of m are append-only and thus safe for concurrent reading. 1768 m sync.Map 1769 } 1770 1771 // ChanOf returns the channel type with the given direction and element type. 1772 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1773 // 1774 // The gc runtime imposes a limit of 64 kB on channel element types. 1775 // If t's size is equal to or exceeds this limit, ChanOf panics. 1776 func ChanOf(dir ChanDir, t Type) Type { 1777 typ := t.(*rtype) 1778 1779 // Look in cache. 1780 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1781 if ch, ok := lookupCache.Load(ckey); ok { 1782 return ch.(*rtype) 1783 } 1784 1785 // This restriction is imposed by the gc compiler and the runtime. 1786 if typ.size >= 1<<16 { 1787 panic("reflect.ChanOf: element size too large") 1788 } 1789 1790 // Look in known types. 1791 // TODO: Precedence when constructing string. 1792 var s string 1793 switch dir { 1794 default: 1795 panic("reflect.ChanOf: invalid dir") 1796 case SendDir: 1797 s = "chan<- " + typ.String() 1798 case RecvDir: 1799 s = "<-chan " + typ.String() 1800 case BothDir: 1801 s = "chan " + typ.String() 1802 } 1803 for _, tt := range typesByString(s) { 1804 ch := (*chanType)(unsafe.Pointer(tt)) 1805 if ch.elem == typ && ch.dir == uintptr(dir) { 1806 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1807 return ti.(Type) 1808 } 1809 } 1810 1811 // Make a channel type. 1812 var ichan interface{} = (chan unsafe.Pointer)(nil) 1813 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1814 ch := *prototype 1815 ch.tflag = tflagRegularMemory 1816 ch.dir = uintptr(dir) 1817 ch.str = resolveReflectName(newName(s, "", false)) 1818 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1819 ch.elem = typ 1820 1821 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) 1822 return ti.(Type) 1823 } 1824 1825 // MapOf returns the map type with the given key and element types. 1826 // For example, if k represents int and e represents string, 1827 // MapOf(k, e) represents map[int]string. 1828 // 1829 // If the key type is not a valid map key type (that is, if it does 1830 // not implement Go's == operator), MapOf panics. 1831 func MapOf(key, elem Type) Type { 1832 ktyp := key.(*rtype) 1833 etyp := elem.(*rtype) 1834 1835 if ktyp.equal == nil { 1836 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1837 } 1838 1839 // Look in cache. 1840 ckey := cacheKey{Map, ktyp, etyp, 0} 1841 if mt, ok := lookupCache.Load(ckey); ok { 1842 return mt.(Type) 1843 } 1844 1845 // Look in known types. 1846 s := "map[" + ktyp.String() + "]" + etyp.String() 1847 for _, tt := range typesByString(s) { 1848 mt := (*mapType)(unsafe.Pointer(tt)) 1849 if mt.key == ktyp && mt.elem == etyp { 1850 ti, _ := lookupCache.LoadOrStore(ckey, tt) 1851 return ti.(Type) 1852 } 1853 } 1854 1855 // Make a map type. 1856 // Note: flag values must match those used in the TMAP case 1857 // in ../cmd/compile/internal/gc/reflect.go:dtypesym. 1858 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1859 mt := **(**mapType)(unsafe.Pointer(&imap)) 1860 mt.str = resolveReflectName(newName(s, "", false)) 1861 mt.tflag = 0 1862 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1863 mt.key = ktyp 1864 mt.elem = etyp 1865 mt.bucket = bucketOf(ktyp, etyp) 1866 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1867 return typehash(ktyp, p, seed) 1868 } 1869 mt.flags = 0 1870 if ktyp.size > maxKeySize { 1871 mt.keysize = uint8(ptrSize) 1872 mt.flags |= 1 // indirect key 1873 } else { 1874 mt.keysize = uint8(ktyp.size) 1875 } 1876 if etyp.size > maxValSize { 1877 mt.valuesize = uint8(ptrSize) 1878 mt.flags |= 2 // indirect value 1879 } else { 1880 mt.valuesize = uint8(etyp.size) 1881 } 1882 mt.bucketsize = uint16(mt.bucket.size) 1883 if isReflexive(ktyp) { 1884 mt.flags |= 4 1885 } 1886 if needKeyUpdate(ktyp) { 1887 mt.flags |= 8 1888 } 1889 if hashMightPanic(ktyp) { 1890 mt.flags |= 16 1891 } 1892 mt.ptrToThis = 0 1893 1894 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) 1895 return ti.(Type) 1896 } 1897 1898 // TODO(crawshaw): as these funcTypeFixedN structs have no methods, 1899 // they could be defined at runtime using the StructOf function. 1900 type funcTypeFixed4 struct { 1901 funcType 1902 args [4]*rtype 1903 } 1904 type funcTypeFixed8 struct { 1905 funcType 1906 args [8]*rtype 1907 } 1908 type funcTypeFixed16 struct { 1909 funcType 1910 args [16]*rtype 1911 } 1912 type funcTypeFixed32 struct { 1913 funcType 1914 args [32]*rtype 1915 } 1916 type funcTypeFixed64 struct { 1917 funcType 1918 args [64]*rtype 1919 } 1920 type funcTypeFixed128 struct { 1921 funcType 1922 args [128]*rtype 1923 } 1924 1925 // FuncOf returns the function type with the given argument and result types. 1926 // For example if k represents int and e represents string, 1927 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1928 // 1929 // The variadic argument controls whether the function is variadic. FuncOf 1930 // panics if the in[len(in)-1] does not represent a slice and variadic is 1931 // true. 1932 func FuncOf(in, out []Type, variadic bool) Type { 1933 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1934 panic("reflect.FuncOf: last arg of variadic func must be slice") 1935 } 1936 1937 // Make a func type. 1938 var ifunc interface{} = (func())(nil) 1939 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1940 n := len(in) + len(out) 1941 1942 var ft *funcType 1943 var args []*rtype 1944 switch { 1945 case n <= 4: 1946 fixed := new(funcTypeFixed4) 1947 args = fixed.args[:0:len(fixed.args)] 1948 ft = &fixed.funcType 1949 case n <= 8: 1950 fixed := new(funcTypeFixed8) 1951 args = fixed.args[:0:len(fixed.args)] 1952 ft = &fixed.funcType 1953 case n <= 16: 1954 fixed := new(funcTypeFixed16) 1955 args = fixed.args[:0:len(fixed.args)] 1956 ft = &fixed.funcType 1957 case n <= 32: 1958 fixed := new(funcTypeFixed32) 1959 args = fixed.args[:0:len(fixed.args)] 1960 ft = &fixed.funcType 1961 case n <= 64: 1962 fixed := new(funcTypeFixed64) 1963 args = fixed.args[:0:len(fixed.args)] 1964 ft = &fixed.funcType 1965 case n <= 128: 1966 fixed := new(funcTypeFixed128) 1967 args = fixed.args[:0:len(fixed.args)] 1968 ft = &fixed.funcType 1969 default: 1970 panic("reflect.FuncOf: too many arguments") 1971 } 1972 *ft = *prototype 1973 1974 // Build a hash and minimally populate ft. 1975 var hash uint32 1976 for _, in := range in { 1977 t := in.(*rtype) 1978 args = append(args, t) 1979 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 1980 } 1981 if variadic { 1982 hash = fnv1(hash, 'v') 1983 } 1984 hash = fnv1(hash, '.') 1985 for _, out := range out { 1986 t := out.(*rtype) 1987 args = append(args, t) 1988 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 1989 } 1990 if len(args) > 50 { 1991 panic("reflect.FuncOf does not support more than 50 arguments") 1992 } 1993 ft.tflag = 0 1994 ft.hash = hash 1995 ft.inCount = uint16(len(in)) 1996 ft.outCount = uint16(len(out)) 1997 if variadic { 1998 ft.outCount |= 1 << 15 1999 } 2000 2001 // Look in cache. 2002 if ts, ok := funcLookupCache.m.Load(hash); ok { 2003 for _, t := range ts.([]*rtype) { 2004 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2005 return t 2006 } 2007 } 2008 } 2009 2010 // Not in cache, lock and retry. 2011 funcLookupCache.Lock() 2012 defer funcLookupCache.Unlock() 2013 if ts, ok := funcLookupCache.m.Load(hash); ok { 2014 for _, t := range ts.([]*rtype) { 2015 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2016 return t 2017 } 2018 } 2019 } 2020 2021 addToCache := func(tt *rtype) Type { 2022 var rts []*rtype 2023 if rti, ok := funcLookupCache.m.Load(hash); ok { 2024 rts = rti.([]*rtype) 2025 } 2026 funcLookupCache.m.Store(hash, append(rts, tt)) 2027 return tt 2028 } 2029 2030 // Look in known types for the same string representation. 2031 str := funcStr(ft) 2032 for _, tt := range typesByString(str) { 2033 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2034 return addToCache(tt) 2035 } 2036 } 2037 2038 // Populate the remaining fields of ft and store in cache. 2039 ft.str = resolveReflectName(newName(str, "", false)) 2040 ft.ptrToThis = 0 2041 return addToCache(&ft.rtype) 2042 } 2043 2044 // funcStr builds a string representation of a funcType. 2045 func funcStr(ft *funcType) string { 2046 repr := make([]byte, 0, 64) 2047 repr = append(repr, "func("...) 2048 for i, t := range ft.in() { 2049 if i > 0 { 2050 repr = append(repr, ", "...) 2051 } 2052 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2053 repr = append(repr, "..."...) 2054 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2055 } else { 2056 repr = append(repr, t.String()...) 2057 } 2058 } 2059 repr = append(repr, ')') 2060 out := ft.out() 2061 if len(out) == 1 { 2062 repr = append(repr, ' ') 2063 } else if len(out) > 1 { 2064 repr = append(repr, " ("...) 2065 } 2066 for i, t := range out { 2067 if i > 0 { 2068 repr = append(repr, ", "...) 2069 } 2070 repr = append(repr, t.String()...) 2071 } 2072 if len(out) > 1 { 2073 repr = append(repr, ')') 2074 } 2075 return string(repr) 2076 } 2077 2078 // isReflexive reports whether the == operation on the type is reflexive. 2079 // That is, x == x for all values x of type t. 2080 func isReflexive(t *rtype) bool { 2081 switch t.Kind() { 2082 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2083 return true 2084 case Float32, Float64, Complex64, Complex128, Interface: 2085 return false 2086 case Array: 2087 tt := (*arrayType)(unsafe.Pointer(t)) 2088 return isReflexive(tt.elem) 2089 case Struct: 2090 tt := (*structType)(unsafe.Pointer(t)) 2091 for _, f := range tt.fields { 2092 if !isReflexive(f.typ) { 2093 return false 2094 } 2095 } 2096 return true 2097 default: 2098 // Func, Map, Slice, Invalid 2099 panic("isReflexive called on non-key type " + t.String()) 2100 } 2101 } 2102 2103 // needKeyUpdate reports whether map overwrites require the key to be copied. 2104 func needKeyUpdate(t *rtype) bool { 2105 switch t.Kind() { 2106 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2107 return false 2108 case Float32, Float64, Complex64, Complex128, Interface, String: 2109 // Float keys can be updated from +0 to -0. 2110 // String keys can be updated to use a smaller backing store. 2111 // Interfaces might have floats of strings in them. 2112 return true 2113 case Array: 2114 tt := (*arrayType)(unsafe.Pointer(t)) 2115 return needKeyUpdate(tt.elem) 2116 case Struct: 2117 tt := (*structType)(unsafe.Pointer(t)) 2118 for _, f := range tt.fields { 2119 if needKeyUpdate(f.typ) { 2120 return true 2121 } 2122 } 2123 return false 2124 default: 2125 // Func, Map, Slice, Invalid 2126 panic("needKeyUpdate called on non-key type " + t.String()) 2127 } 2128 } 2129 2130 // hashMightPanic reports whether the hash of a map key of type t might panic. 2131 func hashMightPanic(t *rtype) bool { 2132 switch t.Kind() { 2133 case Interface: 2134 return true 2135 case Array: 2136 tt := (*arrayType)(unsafe.Pointer(t)) 2137 return hashMightPanic(tt.elem) 2138 case Struct: 2139 tt := (*structType)(unsafe.Pointer(t)) 2140 for _, f := range tt.fields { 2141 if hashMightPanic(f.typ) { 2142 return true 2143 } 2144 } 2145 return false 2146 default: 2147 return false 2148 } 2149 } 2150 2151 // Make sure these routines stay in sync with ../../runtime/map.go! 2152 // These types exist only for GC, so we only fill out GC relevant info. 2153 // Currently, that's just size and the GC program. We also fill in string 2154 // for possible debugging use. 2155 const ( 2156 bucketSize uintptr = 8 2157 maxKeySize uintptr = 128 2158 maxValSize uintptr = 128 2159 ) 2160 2161 func bucketOf(ktyp, etyp *rtype) *rtype { 2162 if ktyp.size > maxKeySize { 2163 ktyp = PtrTo(ktyp).(*rtype) 2164 } 2165 if etyp.size > maxValSize { 2166 etyp = PtrTo(etyp).(*rtype) 2167 } 2168 2169 // Prepare GC data if any. 2170 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2171 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2172 // Note that since the key and value are known to be <= 128 bytes, 2173 // they're guaranteed to have bitmaps instead of GC programs. 2174 var gcdata *byte 2175 var ptrdata uintptr 2176 var overflowPad uintptr 2177 2178 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2179 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2180 panic("reflect: bad size computation in MapOf") 2181 } 2182 2183 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 { 2184 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2185 mask := make([]byte, (nptr+7)/8) 2186 base := bucketSize / ptrSize 2187 2188 if ktyp.ptrdata != 0 { 2189 emitGCMask(mask, base, ktyp, bucketSize) 2190 } 2191 base += bucketSize * ktyp.size / ptrSize 2192 2193 if etyp.ptrdata != 0 { 2194 emitGCMask(mask, base, etyp, bucketSize) 2195 } 2196 base += bucketSize * etyp.size / ptrSize 2197 base += overflowPad / ptrSize 2198 2199 word := base 2200 mask[word/8] |= 1 << (word % 8) 2201 gcdata = &mask[0] 2202 ptrdata = (word + 1) * ptrSize 2203 2204 // overflow word must be last 2205 if ptrdata != size { 2206 panic("reflect: bad layout computation in MapOf") 2207 } 2208 } 2209 2210 b := &rtype{ 2211 align: ptrSize, 2212 size: size, 2213 kind: uint8(Struct), 2214 ptrdata: ptrdata, 2215 gcdata: gcdata, 2216 } 2217 if overflowPad > 0 { 2218 b.align = 8 2219 } 2220 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2221 b.str = resolveReflectName(newName(s, "", false)) 2222 return b 2223 } 2224 2225 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2226 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end] 2227 } 2228 2229 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2230 // offset base. 2231 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { 2232 if typ.kind&kindGCProg != 0 { 2233 panic("reflect: unexpected GC program") 2234 } 2235 ptrs := typ.ptrdata / ptrSize 2236 words := typ.size / ptrSize 2237 mask := typ.gcSlice(0, (ptrs+7)/8) 2238 for j := uintptr(0); j < ptrs; j++ { 2239 if (mask[j/8]>>(j%8))&1 != 0 { 2240 for i := uintptr(0); i < n; i++ { 2241 k := base + i*words + j 2242 out[k/8] |= 1 << (k % 8) 2243 } 2244 } 2245 } 2246 } 2247 2248 // appendGCProg appends the GC program for the first ptrdata bytes of 2249 // typ to dst and returns the extended slice. 2250 func appendGCProg(dst []byte, typ *rtype) []byte { 2251 if typ.kind&kindGCProg != 0 { 2252 // Element has GC program; emit one element. 2253 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata))) 2254 prog := typ.gcSlice(4, 4+n-1) 2255 return append(dst, prog...) 2256 } 2257 2258 // Element is small with pointer mask; use as literal bits. 2259 ptrs := typ.ptrdata / ptrSize 2260 mask := typ.gcSlice(0, (ptrs+7)/8) 2261 2262 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2263 for ; ptrs > 120; ptrs -= 120 { 2264 dst = append(dst, 120) 2265 dst = append(dst, mask[:15]...) 2266 mask = mask[15:] 2267 } 2268 2269 dst = append(dst, byte(ptrs)) 2270 dst = append(dst, mask...) 2271 return dst 2272 } 2273 2274 // SliceOf returns the slice type with element type t. 2275 // For example, if t represents int, SliceOf(t) represents []int. 2276 func SliceOf(t Type) Type { 2277 typ := t.(*rtype) 2278 2279 // Look in cache. 2280 ckey := cacheKey{Slice, typ, nil, 0} 2281 if slice, ok := lookupCache.Load(ckey); ok { 2282 return slice.(Type) 2283 } 2284 2285 // Look in known types. 2286 s := "[]" + typ.String() 2287 for _, tt := range typesByString(s) { 2288 slice := (*sliceType)(unsafe.Pointer(tt)) 2289 if slice.elem == typ { 2290 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2291 return ti.(Type) 2292 } 2293 } 2294 2295 // Make a slice type. 2296 var islice interface{} = ([]unsafe.Pointer)(nil) 2297 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2298 slice := *prototype 2299 slice.tflag = 0 2300 slice.str = resolveReflectName(newName(s, "", false)) 2301 slice.hash = fnv1(typ.hash, '[') 2302 slice.elem = typ 2303 slice.ptrToThis = 0 2304 2305 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) 2306 return ti.(Type) 2307 } 2308 2309 // The structLookupCache caches StructOf lookups. 2310 // StructOf does not share the common lookupCache since we need to pin 2311 // the memory associated with *structTypeFixedN. 2312 var structLookupCache struct { 2313 sync.Mutex // Guards stores (but not loads) on m. 2314 2315 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2316 // Elements in m are append-only and thus safe for concurrent reading. 2317 m sync.Map 2318 } 2319 2320 type structTypeUncommon struct { 2321 structType 2322 u uncommonType 2323 } 2324 2325 // isLetter reports whether a given 'rune' is classified as a Letter. 2326 func isLetter(ch rune) bool { 2327 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2328 } 2329 2330 // isValidFieldName checks if a string is a valid (struct) field name or not. 2331 // 2332 // According to the language spec, a field name should be an identifier. 2333 // 2334 // identifier = letter { letter | unicode_digit } . 2335 // letter = unicode_letter | "_" . 2336 func isValidFieldName(fieldName string) bool { 2337 for i, c := range fieldName { 2338 if i == 0 && !isLetter(c) { 2339 return false 2340 } 2341 2342 if !(isLetter(c) || unicode.IsDigit(c)) { 2343 return false 2344 } 2345 } 2346 2347 return len(fieldName) > 0 2348 } 2349 2350 // StructOf returns the struct type containing fields. 2351 // The Offset and Index fields are ignored and computed as they would be 2352 // by the compiler. 2353 // 2354 // StructOf currently does not generate wrapper methods for embedded 2355 // fields and panics if passed unexported StructFields. 2356 // These limitations may be lifted in a future version. 2357 func StructOf(fields []StructField) Type { 2358 var ( 2359 hash = fnv1(0, []byte("struct {")...) 2360 size uintptr 2361 typalign uint8 2362 comparable = true 2363 methods []method 2364 2365 fs = make([]structField, len(fields)) 2366 repr = make([]byte, 0, 64) 2367 fset = map[string]struct{}{} // fields' names 2368 2369 hasGCProg = false // records whether a struct-field type has a GCProg 2370 ) 2371 2372 lastzero := uintptr(0) 2373 repr = append(repr, "struct {"...) 2374 pkgpath := "" 2375 for i, field := range fields { 2376 if field.Name == "" { 2377 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2378 } 2379 if !isValidFieldName(field.Name) { 2380 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2381 } 2382 if field.Type == nil { 2383 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2384 } 2385 f, fpkgpath := runtimeStructField(field) 2386 ft := f.typ 2387 if ft.kind&kindGCProg != 0 { 2388 hasGCProg = true 2389 } 2390 if fpkgpath != "" { 2391 if pkgpath == "" { 2392 pkgpath = fpkgpath 2393 } else if pkgpath != fpkgpath { 2394 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2395 } 2396 } 2397 2398 // Update string and hash 2399 name := f.name.name() 2400 hash = fnv1(hash, []byte(name)...) 2401 repr = append(repr, (" " + name)...) 2402 if f.embedded() { 2403 // Embedded field 2404 if f.typ.Kind() == Ptr { 2405 // Embedded ** and *interface{} are illegal 2406 elem := ft.Elem() 2407 if k := elem.Kind(); k == Ptr || k == Interface { 2408 panic("reflect.StructOf: illegal embedded field type " + ft.String()) 2409 } 2410 } 2411 2412 switch f.typ.Kind() { 2413 case Interface: 2414 ift := (*interfaceType)(unsafe.Pointer(ft)) 2415 for im, m := range ift.methods { 2416 if ift.nameOff(m.name).pkgPath() != "" { 2417 // TODO(sbinet). Issue 15924. 2418 panic("reflect: embedded interface with unexported method(s) not implemented") 2419 } 2420 2421 var ( 2422 mtyp = ift.typeOff(m.typ) 2423 ifield = i 2424 imethod = im 2425 ifn Value 2426 tfn Value 2427 ) 2428 2429 if ft.kind&kindDirectIface != 0 { 2430 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2431 var args []Value 2432 var recv = in[0] 2433 if len(in) > 1 { 2434 args = in[1:] 2435 } 2436 return recv.Field(ifield).Method(imethod).Call(args) 2437 }) 2438 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2439 var args []Value 2440 var recv = in[0] 2441 if len(in) > 1 { 2442 args = in[1:] 2443 } 2444 return recv.Field(ifield).Method(imethod).Call(args) 2445 }) 2446 } else { 2447 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2448 var args []Value 2449 var recv = in[0] 2450 if len(in) > 1 { 2451 args = in[1:] 2452 } 2453 return recv.Field(ifield).Method(imethod).Call(args) 2454 }) 2455 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2456 var args []Value 2457 var recv = Indirect(in[0]) 2458 if len(in) > 1 { 2459 args = in[1:] 2460 } 2461 return recv.Field(ifield).Method(imethod).Call(args) 2462 }) 2463 } 2464 2465 methods = append(methods, method{ 2466 name: resolveReflectName(ift.nameOff(m.name)), 2467 mtyp: resolveReflectType(mtyp), 2468 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2469 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2470 }) 2471 } 2472 case Ptr: 2473 ptr := (*ptrType)(unsafe.Pointer(ft)) 2474 if unt := ptr.uncommon(); unt != nil { 2475 if i > 0 && unt.mcount > 0 { 2476 // Issue 15924. 2477 panic("reflect: embedded type with methods not implemented if type is not first field") 2478 } 2479 if len(fields) > 1 { 2480 panic("reflect: embedded type with methods not implemented if there is more than one field") 2481 } 2482 for _, m := range unt.methods() { 2483 mname := ptr.nameOff(m.name) 2484 if mname.pkgPath() != "" { 2485 // TODO(sbinet). 2486 // Issue 15924. 2487 panic("reflect: embedded interface with unexported method(s) not implemented") 2488 } 2489 methods = append(methods, method{ 2490 name: resolveReflectName(mname), 2491 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2492 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2493 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2494 }) 2495 } 2496 } 2497 if unt := ptr.elem.uncommon(); unt != nil { 2498 for _, m := range unt.methods() { 2499 mname := ptr.nameOff(m.name) 2500 if mname.pkgPath() != "" { 2501 // TODO(sbinet) 2502 // Issue 15924. 2503 panic("reflect: embedded interface with unexported method(s) not implemented") 2504 } 2505 methods = append(methods, method{ 2506 name: resolveReflectName(mname), 2507 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2508 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2509 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2510 }) 2511 } 2512 } 2513 default: 2514 if unt := ft.uncommon(); unt != nil { 2515 if i > 0 && unt.mcount > 0 { 2516 // Issue 15924. 2517 panic("reflect: embedded type with methods not implemented if type is not first field") 2518 } 2519 if len(fields) > 1 && ft.kind&kindDirectIface != 0 { 2520 panic("reflect: embedded type with methods not implemented for non-pointer type") 2521 } 2522 for _, m := range unt.methods() { 2523 mname := ft.nameOff(m.name) 2524 if mname.pkgPath() != "" { 2525 // TODO(sbinet) 2526 // Issue 15924. 2527 panic("reflect: embedded interface with unexported method(s) not implemented") 2528 } 2529 methods = append(methods, method{ 2530 name: resolveReflectName(mname), 2531 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2532 ifn: resolveReflectText(ft.textOff(m.ifn)), 2533 tfn: resolveReflectText(ft.textOff(m.tfn)), 2534 }) 2535 2536 } 2537 } 2538 } 2539 } 2540 if _, dup := fset[name]; dup { 2541 panic("reflect.StructOf: duplicate field " + name) 2542 } 2543 fset[name] = struct{}{} 2544 2545 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2546 2547 repr = append(repr, (" " + ft.String())...) 2548 if f.name.tagLen() > 0 { 2549 hash = fnv1(hash, []byte(f.name.tag())...) 2550 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2551 } 2552 if i < len(fields)-1 { 2553 repr = append(repr, ';') 2554 } 2555 2556 comparable = comparable && (ft.equal != nil) 2557 2558 offset := align(size, uintptr(ft.align)) 2559 if ft.align > typalign { 2560 typalign = ft.align 2561 } 2562 size = offset + ft.size 2563 f.offsetEmbed |= offset << 1 2564 2565 if ft.size == 0 { 2566 lastzero = size 2567 } 2568 2569 fs[i] = f 2570 } 2571 2572 if size > 0 && lastzero == size { 2573 // This is a non-zero sized struct that ends in a 2574 // zero-sized field. We add an extra byte of padding, 2575 // to ensure that taking the address of the final 2576 // zero-sized field can't manufacture a pointer to the 2577 // next object in the heap. See issue 9401. 2578 size++ 2579 } 2580 2581 var typ *structType 2582 var ut *uncommonType 2583 2584 if len(methods) == 0 { 2585 t := new(structTypeUncommon) 2586 typ = &t.structType 2587 ut = &t.u 2588 } else { 2589 // A *rtype representing a struct is followed directly in memory by an 2590 // array of method objects representing the methods attached to the 2591 // struct. To get the same layout for a run time generated type, we 2592 // need an array directly following the uncommonType memory. 2593 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2594 tt := New(StructOf([]StructField{ 2595 {Name: "S", Type: TypeOf(structType{})}, 2596 {Name: "U", Type: TypeOf(uncommonType{})}, 2597 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2598 })) 2599 2600 typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr())) 2601 ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr())) 2602 2603 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods) 2604 } 2605 // TODO(sbinet): Once we allow embedding multiple types, 2606 // methods will need to be sorted like the compiler does. 2607 // TODO(sbinet): Once we allow non-exported methods, we will 2608 // need to compute xcount as the number of exported methods. 2609 ut.mcount = uint16(len(methods)) 2610 ut.xcount = ut.mcount 2611 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2612 2613 if len(fs) > 0 { 2614 repr = append(repr, ' ') 2615 } 2616 repr = append(repr, '}') 2617 hash = fnv1(hash, '}') 2618 str := string(repr) 2619 2620 // Round the size up to be a multiple of the alignment. 2621 size = align(size, uintptr(typalign)) 2622 2623 // Make the struct type. 2624 var istruct interface{} = struct{}{} 2625 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2626 *typ = *prototype 2627 typ.fields = fs 2628 if pkgpath != "" { 2629 typ.pkgPath = newName(pkgpath, "", false) 2630 } 2631 2632 // Look in cache. 2633 if ts, ok := structLookupCache.m.Load(hash); ok { 2634 for _, st := range ts.([]Type) { 2635 t := st.common() 2636 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2637 return t 2638 } 2639 } 2640 } 2641 2642 // Not in cache, lock and retry. 2643 structLookupCache.Lock() 2644 defer structLookupCache.Unlock() 2645 if ts, ok := structLookupCache.m.Load(hash); ok { 2646 for _, st := range ts.([]Type) { 2647 t := st.common() 2648 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2649 return t 2650 } 2651 } 2652 } 2653 2654 addToCache := func(t Type) Type { 2655 var ts []Type 2656 if ti, ok := structLookupCache.m.Load(hash); ok { 2657 ts = ti.([]Type) 2658 } 2659 structLookupCache.m.Store(hash, append(ts, t)) 2660 return t 2661 } 2662 2663 // Look in known types. 2664 for _, t := range typesByString(str) { 2665 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2666 // even if 't' wasn't a structType with methods, we should be ok 2667 // as the 'u uncommonType' field won't be accessed except when 2668 // tflag&tflagUncommon is set. 2669 return addToCache(t) 2670 } 2671 } 2672 2673 typ.str = resolveReflectName(newName(str, "", false)) 2674 typ.tflag = 0 // TODO: set tflagRegularMemory 2675 typ.hash = hash 2676 typ.size = size 2677 typ.ptrdata = typeptrdata(typ.common()) 2678 typ.align = typalign 2679 typ.fieldAlign = typalign 2680 typ.ptrToThis = 0 2681 if len(methods) > 0 { 2682 typ.tflag |= tflagUncommon 2683 } 2684 2685 if hasGCProg { 2686 lastPtrField := 0 2687 for i, ft := range fs { 2688 if ft.typ.pointers() { 2689 lastPtrField = i 2690 } 2691 } 2692 prog := []byte{0, 0, 0, 0} // will be length of prog 2693 var off uintptr 2694 for i, ft := range fs { 2695 if i > lastPtrField { 2696 // gcprog should not include anything for any field after 2697 // the last field that contains pointer data 2698 break 2699 } 2700 if !ft.typ.pointers() { 2701 // Ignore pointerless fields. 2702 continue 2703 } 2704 // Pad to start of this field with zeros. 2705 if ft.offset() > off { 2706 n := (ft.offset() - off) / ptrSize 2707 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2708 if n > 1 { 2709 prog = append(prog, 0x81) // repeat previous bit 2710 prog = appendVarint(prog, n-1) // n-1 times 2711 } 2712 off = ft.offset() 2713 } 2714 2715 prog = appendGCProg(prog, ft.typ) 2716 off += ft.typ.ptrdata 2717 } 2718 prog = append(prog, 0) 2719 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2720 typ.kind |= kindGCProg 2721 typ.gcdata = &prog[0] 2722 } else { 2723 typ.kind &^= kindGCProg 2724 bv := new(bitVector) 2725 addTypeBits(bv, 0, typ.common()) 2726 if len(bv.data) > 0 { 2727 typ.gcdata = &bv.data[0] 2728 } 2729 } 2730 typ.equal = nil 2731 if comparable { 2732 typ.equal = func(p, q unsafe.Pointer) bool { 2733 for _, ft := range typ.fields { 2734 pi := add(p, ft.offset(), "&x.field safe") 2735 qi := add(q, ft.offset(), "&x.field safe") 2736 if !ft.typ.equal(pi, qi) { 2737 return false 2738 } 2739 } 2740 return true 2741 } 2742 } 2743 2744 switch { 2745 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2746 // structs of 1 direct iface type can be direct 2747 typ.kind |= kindDirectIface 2748 default: 2749 typ.kind &^= kindDirectIface 2750 } 2751 2752 return addToCache(&typ.rtype) 2753 } 2754 2755 // runtimeStructField takes a StructField value passed to StructOf and 2756 // returns both the corresponding internal representation, of type 2757 // structField, and the pkgpath value to use for this field. 2758 func runtimeStructField(field StructField) (structField, string) { 2759 if field.Anonymous && field.PkgPath != "" { 2760 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2761 } 2762 2763 exported := field.PkgPath == "" 2764 if exported { 2765 // Best-effort check for misuse. 2766 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2767 c := field.Name[0] 2768 if 'a' <= c && c <= 'z' || c == '_' { 2769 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2770 } 2771 } 2772 2773 offsetEmbed := uintptr(0) 2774 if field.Anonymous { 2775 offsetEmbed |= 1 2776 } 2777 2778 resolveReflectType(field.Type.common()) // install in runtime 2779 f := structField{ 2780 name: newName(field.Name, string(field.Tag), exported), 2781 typ: field.Type.common(), 2782 offsetEmbed: offsetEmbed, 2783 } 2784 return f, field.PkgPath 2785 } 2786 2787 // typeptrdata returns the length in bytes of the prefix of t 2788 // containing pointer data. Anything after this offset is scalar data. 2789 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2790 func typeptrdata(t *rtype) uintptr { 2791 switch t.Kind() { 2792 case Struct: 2793 st := (*structType)(unsafe.Pointer(t)) 2794 // find the last field that has pointers. 2795 field := -1 2796 for i := range st.fields { 2797 ft := st.fields[i].typ 2798 if ft.pointers() { 2799 field = i 2800 } 2801 } 2802 if field == -1 { 2803 return 0 2804 } 2805 f := st.fields[field] 2806 return f.offset() + f.typ.ptrdata 2807 2808 default: 2809 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2810 } 2811 } 2812 2813 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2814 const maxPtrmaskBytes = 2048 2815 2816 // ArrayOf returns the array type with the given count and element type. 2817 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2818 // 2819 // If the resulting type would be larger than the available address space, 2820 // ArrayOf panics. 2821 func ArrayOf(count int, elem Type) Type { 2822 typ := elem.(*rtype) 2823 2824 // Look in cache. 2825 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2826 if array, ok := lookupCache.Load(ckey); ok { 2827 return array.(Type) 2828 } 2829 2830 // Look in known types. 2831 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2832 for _, tt := range typesByString(s) { 2833 array := (*arrayType)(unsafe.Pointer(tt)) 2834 if array.elem == typ { 2835 ti, _ := lookupCache.LoadOrStore(ckey, tt) 2836 return ti.(Type) 2837 } 2838 } 2839 2840 // Make an array type. 2841 var iarray interface{} = [1]unsafe.Pointer{} 2842 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2843 array := *prototype 2844 array.tflag = typ.tflag & tflagRegularMemory 2845 array.str = resolveReflectName(newName(s, "", false)) 2846 array.hash = fnv1(typ.hash, '[') 2847 for n := uint32(count); n > 0; n >>= 8 { 2848 array.hash = fnv1(array.hash, byte(n)) 2849 } 2850 array.hash = fnv1(array.hash, ']') 2851 array.elem = typ 2852 array.ptrToThis = 0 2853 if typ.size > 0 { 2854 max := ^uintptr(0) / typ.size 2855 if uintptr(count) > max { 2856 panic("reflect.ArrayOf: array size would exceed virtual address space") 2857 } 2858 } 2859 array.size = typ.size * uintptr(count) 2860 if count > 0 && typ.ptrdata != 0 { 2861 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2862 } 2863 array.align = typ.align 2864 array.fieldAlign = typ.fieldAlign 2865 array.len = uintptr(count) 2866 array.slice = SliceOf(elem).(*rtype) 2867 2868 switch { 2869 case typ.ptrdata == 0 || array.size == 0: 2870 // No pointers. 2871 array.gcdata = nil 2872 array.ptrdata = 0 2873 2874 case count == 1: 2875 // In memory, 1-element array looks just like the element. 2876 array.kind |= typ.kind & kindGCProg 2877 array.gcdata = typ.gcdata 2878 array.ptrdata = typ.ptrdata 2879 2880 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2881 // Element is small with pointer mask; array is still small. 2882 // Create direct pointer mask by turning each 1 bit in elem 2883 // into count 1 bits in larger mask. 2884 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2885 emitGCMask(mask, 0, typ, array.len) 2886 array.gcdata = &mask[0] 2887 2888 default: 2889 // Create program that emits one element 2890 // and then repeats to make the array. 2891 prog := []byte{0, 0, 0, 0} // will be length of prog 2892 prog = appendGCProg(prog, typ) 2893 // Pad from ptrdata to size. 2894 elemPtrs := typ.ptrdata / ptrSize 2895 elemWords := typ.size / ptrSize 2896 if elemPtrs < elemWords { 2897 // Emit literal 0 bit, then repeat as needed. 2898 prog = append(prog, 0x01, 0x00) 2899 if elemPtrs+1 < elemWords { 2900 prog = append(prog, 0x81) 2901 prog = appendVarint(prog, elemWords-elemPtrs-1) 2902 } 2903 } 2904 // Repeat count-1 times. 2905 if elemWords < 0x80 { 2906 prog = append(prog, byte(elemWords|0x80)) 2907 } else { 2908 prog = append(prog, 0x80) 2909 prog = appendVarint(prog, elemWords) 2910 } 2911 prog = appendVarint(prog, uintptr(count)-1) 2912 prog = append(prog, 0) 2913 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2914 array.kind |= kindGCProg 2915 array.gcdata = &prog[0] 2916 array.ptrdata = array.size // overestimate but ok; must match program 2917 } 2918 2919 etyp := typ.common() 2920 esize := etyp.Size() 2921 2922 array.equal = nil 2923 if eequal := etyp.equal; eequal != nil { 2924 array.equal = func(p, q unsafe.Pointer) bool { 2925 for i := 0; i < count; i++ { 2926 pi := arrayAt(p, i, esize, "i < count") 2927 qi := arrayAt(q, i, esize, "i < count") 2928 if !eequal(pi, qi) { 2929 return false 2930 } 2931 2932 } 2933 return true 2934 } 2935 } 2936 2937 switch { 2938 case count == 1 && !ifaceIndir(typ): 2939 // array of 1 direct iface type can be direct 2940 array.kind |= kindDirectIface 2941 default: 2942 array.kind &^= kindDirectIface 2943 } 2944 2945 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) 2946 return ti.(Type) 2947 } 2948 2949 func appendVarint(x []byte, v uintptr) []byte { 2950 for ; v >= 0x80; v >>= 7 { 2951 x = append(x, byte(v|0x80)) 2952 } 2953 x = append(x, byte(v)) 2954 return x 2955 } 2956 2957 // toType converts from a *rtype to a Type that can be returned 2958 // to the client of package reflect. In gc, the only concern is that 2959 // a nil *rtype must be replaced by a nil Type, but in gccgo this 2960 // function takes care of ensuring that multiple *rtype for the same 2961 // type are coalesced into a single Type. 2962 func toType(t *rtype) Type { 2963 if t == nil { 2964 return nil 2965 } 2966 return t 2967 } 2968 2969 type layoutKey struct { 2970 ftyp *funcType // function signature 2971 rcvr *rtype // receiver type, or nil if none 2972 } 2973 2974 type layoutType struct { 2975 t *rtype 2976 argSize uintptr // size of arguments 2977 retOffset uintptr // offset of return values. 2978 stack *bitVector 2979 framePool *sync.Pool 2980 } 2981 2982 var layoutCache sync.Map // map[layoutKey]layoutType 2983 2984 // funcLayout computes a struct type representing the layout of the 2985 // function arguments and return values for the function type t. 2986 // If rcvr != nil, rcvr specifies the type of the receiver. 2987 // The returned type exists only for GC, so we only fill out GC relevant info. 2988 // Currently, that's just size and the GC program. We also fill in 2989 // the name for possible debugging use. 2990 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 2991 if t.Kind() != Func { 2992 panic("reflect: funcLayout of non-func type " + t.String()) 2993 } 2994 if rcvr != nil && rcvr.Kind() == Interface { 2995 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 2996 } 2997 k := layoutKey{t, rcvr} 2998 if lti, ok := layoutCache.Load(k); ok { 2999 lt := lti.(layoutType) 3000 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3001 } 3002 3003 // compute gc program & stack bitmap for arguments 3004 ptrmap := new(bitVector) 3005 var offset uintptr 3006 if rcvr != nil { 3007 // Reflect uses the "interface" calling convention for 3008 // methods, where receivers take one word of argument 3009 // space no matter how big they actually are. 3010 if ifaceIndir(rcvr) || rcvr.pointers() { 3011 ptrmap.append(1) 3012 } else { 3013 ptrmap.append(0) 3014 } 3015 offset += ptrSize 3016 } 3017 for _, arg := range t.in() { 3018 offset += -offset & uintptr(arg.align-1) 3019 addTypeBits(ptrmap, offset, arg) 3020 offset += arg.size 3021 } 3022 argSize = offset 3023 offset += -offset & (ptrSize - 1) 3024 retOffset = offset 3025 for _, res := range t.out() { 3026 offset += -offset & uintptr(res.align-1) 3027 addTypeBits(ptrmap, offset, res) 3028 offset += res.size 3029 } 3030 offset += -offset & (ptrSize - 1) 3031 3032 // build dummy rtype holding gc program 3033 x := &rtype{ 3034 align: ptrSize, 3035 size: offset, 3036 ptrdata: uintptr(ptrmap.n) * ptrSize, 3037 } 3038 if ptrmap.n > 0 { 3039 x.gcdata = &ptrmap.data[0] 3040 } 3041 3042 var s string 3043 if rcvr != nil { 3044 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3045 } else { 3046 s = "funcargs(" + t.String() + ")" 3047 } 3048 x.str = resolveReflectName(newName(s, "", false)) 3049 3050 // cache result for future callers 3051 framePool = &sync.Pool{New: func() interface{} { 3052 return unsafe_New(x) 3053 }} 3054 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 3055 t: x, 3056 argSize: argSize, 3057 retOffset: retOffset, 3058 stack: ptrmap, 3059 framePool: framePool, 3060 }) 3061 lt := lti.(layoutType) 3062 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool 3063 } 3064 3065 // ifaceIndir reports whether t is stored indirectly in an interface value. 3066 func ifaceIndir(t *rtype) bool { 3067 return t.kind&kindDirectIface == 0 3068 } 3069 3070 type bitVector struct { 3071 n uint32 // number of bits 3072 data []byte 3073 } 3074 3075 // append a bit to the bitmap. 3076 func (bv *bitVector) append(bit uint8) { 3077 if bv.n%8 == 0 { 3078 bv.data = append(bv.data, 0) 3079 } 3080 bv.data[bv.n/8] |= bit << (bv.n % 8) 3081 bv.n++ 3082 } 3083 3084 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3085 if t.ptrdata == 0 { 3086 return 3087 } 3088 3089 switch Kind(t.kind & kindMask) { 3090 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3091 // 1 pointer at start of representation 3092 for bv.n < uint32(offset/uintptr(ptrSize)) { 3093 bv.append(0) 3094 } 3095 bv.append(1) 3096 3097 case Interface: 3098 // 2 pointers 3099 for bv.n < uint32(offset/uintptr(ptrSize)) { 3100 bv.append(0) 3101 } 3102 bv.append(1) 3103 bv.append(1) 3104 3105 case Array: 3106 // repeat inner type 3107 tt := (*arrayType)(unsafe.Pointer(t)) 3108 for i := 0; i < int(tt.len); i++ { 3109 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3110 } 3111 3112 case Struct: 3113 // apply fields 3114 tt := (*structType)(unsafe.Pointer(t)) 3115 for i := range tt.fields { 3116 f := &tt.fields[i] 3117 addTypeBits(bv, offset+f.offset(), f.typ) 3118 } 3119 } 3120 }