github.com/lovishpuri/go-40569/src@v0.0.0-20230519171745-f8623e7c56cf/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "internal/abi" 20 "internal/goarch" 21 "strconv" 22 "sync" 23 "unicode" 24 "unicode/utf8" 25 "unsafe" 26 ) 27 28 // Type is the representation of a Go type. 29 // 30 // Not all methods apply to all kinds of types. Restrictions, 31 // if any, are noted in the documentation for each method. 32 // Use the Kind method to find out the kind of type before 33 // calling kind-specific methods. Calling a method 34 // inappropriate to the kind of type causes a run-time panic. 35 // 36 // Type values are comparable, such as with the == operator, 37 // so they can be used as map keys. 38 // Two Type values are equal if they represent identical types. 39 type Type interface { 40 // Methods applicable to all types. 41 42 // Align returns the alignment in bytes of a value of 43 // this type when allocated in memory. 44 Align() int 45 46 // FieldAlign returns the alignment in bytes of a value of 47 // this type when used as a field in a struct. 48 FieldAlign() int 49 50 // Method returns the i'th method in the type's method set. 51 // It panics if i is not in the range [0, NumMethod()). 52 // 53 // For a non-interface type T or *T, the returned Method's Type and Func 54 // fields describe a function whose first argument is the receiver, 55 // and only exported methods are accessible. 56 // 57 // For an interface type, the returned Method's Type field gives the 58 // method signature, without a receiver, and the Func field is nil. 59 // 60 // Methods are sorted in lexicographic order. 61 Method(int) Method 62 63 // MethodByName returns the method with that name in the type's 64 // method set and a boolean indicating if the method was found. 65 // 66 // For a non-interface type T or *T, the returned Method's Type and Func 67 // fields describe a function whose first argument is the receiver. 68 // 69 // For an interface type, the returned Method's Type field gives the 70 // method signature, without a receiver, and the Func field is nil. 71 MethodByName(string) (Method, bool) 72 73 // NumMethod returns the number of methods accessible using Method. 74 // 75 // For a non-interface type, it returns the number of exported methods. 76 // 77 // For an interface type, it returns the number of exported and unexported methods. 78 NumMethod() int 79 80 // Name returns the type's name within its package for a defined type. 81 // For other (non-defined) types it returns the empty string. 82 Name() string 83 84 // PkgPath returns a defined type's package path, that is, the import path 85 // that uniquely identifies the package, such as "encoding/base64". 86 // If the type was predeclared (string, error) or not defined (*T, struct{}, 87 // []int, or A where A is an alias for a non-defined type), the package path 88 // will be the empty string. 89 PkgPath() string 90 91 // Size returns the number of bytes needed to store 92 // a value of the given type; it is analogous to unsafe.Sizeof. 93 Size() uintptr 94 95 // String returns a string representation of the type. 96 // The string representation may use shortened package names 97 // (e.g., base64 instead of "encoding/base64") and is not 98 // guaranteed to be unique among types. To test for type identity, 99 // compare the Types directly. 100 String() string 101 102 // Kind returns the specific kind of this type. 103 Kind() Kind 104 105 // Implements reports whether the type implements the interface type u. 106 Implements(u Type) bool 107 108 // AssignableTo reports whether a value of the type is assignable to type u. 109 AssignableTo(u Type) bool 110 111 // ConvertibleTo reports whether a value of the type is convertible to type u. 112 // Even if ConvertibleTo returns true, the conversion may still panic. 113 // For example, a slice of type []T is convertible to *[N]T, 114 // but the conversion will panic if its length is less than N. 115 ConvertibleTo(u Type) bool 116 117 // Comparable reports whether values of this type are comparable. 118 // Even if Comparable returns true, the comparison may still panic. 119 // For example, values of interface type are comparable, 120 // but the comparison will panic if their dynamic type is not comparable. 121 Comparable() bool 122 123 // Methods applicable only to some types, depending on Kind. 124 // The methods allowed for each kind are: 125 // 126 // Int*, Uint*, Float*, Complex*: Bits 127 // Array: Elem, Len 128 // Chan: ChanDir, Elem 129 // Func: In, NumIn, Out, NumOut, IsVariadic. 130 // Map: Key, Elem 131 // Pointer: Elem 132 // Slice: Elem 133 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 134 135 // Bits returns the size of the type in bits. 136 // It panics if the type's Kind is not one of the 137 // sized or unsized Int, Uint, Float, or Complex kinds. 138 Bits() int 139 140 // ChanDir returns a channel type's direction. 141 // It panics if the type's Kind is not Chan. 142 ChanDir() ChanDir 143 144 // IsVariadic reports whether a function type's final input parameter 145 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 146 // implicit actual type []T. 147 // 148 // For concreteness, if t represents func(x int, y ... float64), then 149 // 150 // t.NumIn() == 2 151 // t.In(0) is the reflect.Type for "int" 152 // t.In(1) is the reflect.Type for "[]float64" 153 // t.IsVariadic() == true 154 // 155 // IsVariadic panics if the type's Kind is not Func. 156 IsVariadic() bool 157 158 // Elem returns a type's element type. 159 // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. 160 Elem() Type 161 162 // Field returns a struct type's i'th field. 163 // It panics if the type's Kind is not Struct. 164 // It panics if i is not in the range [0, NumField()). 165 Field(i int) StructField 166 167 // FieldByIndex returns the nested field corresponding 168 // to the index sequence. It is equivalent to calling Field 169 // successively for each index i. 170 // It panics if the type's Kind is not Struct. 171 FieldByIndex(index []int) StructField 172 173 // FieldByName returns the struct field with the given name 174 // and a boolean indicating if the field was found. 175 FieldByName(name string) (StructField, bool) 176 177 // FieldByNameFunc returns the struct field with a name 178 // that satisfies the match function and a boolean indicating if 179 // the field was found. 180 // 181 // FieldByNameFunc considers the fields in the struct itself 182 // and then the fields in any embedded structs, in breadth first order, 183 // stopping at the shallowest nesting depth containing one or more 184 // fields satisfying the match function. If multiple fields at that depth 185 // satisfy the match function, they cancel each other 186 // and FieldByNameFunc returns no match. 187 // This behavior mirrors Go's handling of name lookup in 188 // structs containing embedded fields. 189 FieldByNameFunc(match func(string) bool) (StructField, bool) 190 191 // In returns the type of a function type's i'th input parameter. 192 // It panics if the type's Kind is not Func. 193 // It panics if i is not in the range [0, NumIn()). 194 In(i int) Type 195 196 // Key returns a map type's key type. 197 // It panics if the type's Kind is not Map. 198 Key() Type 199 200 // Len returns an array type's length. 201 // It panics if the type's Kind is not Array. 202 Len() int 203 204 // NumField returns a struct type's field count. 205 // It panics if the type's Kind is not Struct. 206 NumField() int 207 208 // NumIn returns a function type's input parameter count. 209 // It panics if the type's Kind is not Func. 210 NumIn() int 211 212 // NumOut returns a function type's output parameter count. 213 // It panics if the type's Kind is not Func. 214 NumOut() int 215 216 // Out returns the type of a function type's i'th output parameter. 217 // It panics if the type's Kind is not Func. 218 // It panics if i is not in the range [0, NumOut()). 219 Out(i int) Type 220 221 common() *abi.Type 222 uncommon() *uncommonType 223 } 224 225 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 226 // if the names are equal, even if they are unexported names originating 227 // in different packages. The practical effect of this is that the result of 228 // t.FieldByName("x") is not well defined if the struct type t contains 229 // multiple fields named x (embedded from different packages). 230 // FieldByName may return one of the fields named x or may report that there are none. 231 // See https://golang.org/issue/4876 for more details. 232 233 /* 234 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). 235 * A few are known to ../runtime/type.go to convey to debuggers. 236 * They are also known to ../runtime/type.go. 237 */ 238 239 // A Kind represents the specific kind of type that a Type represents. 240 // The zero Kind is not a valid kind. 241 type Kind uint 242 243 const ( 244 Invalid Kind = iota 245 Bool 246 Int 247 Int8 248 Int16 249 Int32 250 Int64 251 Uint 252 Uint8 253 Uint16 254 Uint32 255 Uint64 256 Uintptr 257 Float32 258 Float64 259 Complex64 260 Complex128 261 Array 262 Chan 263 Func 264 Interface 265 Map 266 Pointer 267 Slice 268 String 269 Struct 270 UnsafePointer 271 ) 272 273 // Ptr is the old name for the Pointer kind. 274 const Ptr = Pointer 275 276 // uncommonType is present only for defined types or types with methods 277 // (if T is a defined type, the uncommonTypes for T and *T have methods). 278 // Using a pointer to this struct reduces the overall size required 279 // to describe a non-defined type with no methods. 280 type uncommonType = abi.UncommonType 281 282 // Embed this type to get common/uncommon 283 type common struct { 284 abi.Type 285 } 286 287 // rtype is the common implementation of most values. 288 // It is embedded in other struct types. 289 type rtype struct { 290 t abi.Type 291 } 292 293 func (t *rtype) common() *abi.Type { 294 return &t.t 295 } 296 297 func (t *rtype) uncommon() *abi.UncommonType { 298 return t.t.Uncommon() 299 } 300 301 type aNameOff = abi.NameOff 302 type aTypeOff = abi.TypeOff 303 type aTextOff = abi.TextOff 304 305 // ChanDir represents a channel type's direction. 306 type ChanDir int 307 308 const ( 309 RecvDir ChanDir = 1 << iota // <-chan 310 SendDir // chan<- 311 BothDir = RecvDir | SendDir // chan 312 ) 313 314 // arrayType represents a fixed array type. 315 type arrayType = abi.ArrayType 316 317 // chanType represents a channel type. 318 type chanType = abi.ChanType 319 320 // funcType represents a function type. 321 // 322 // A *rtype for each in and out parameter is stored in an array that 323 // directly follows the funcType (and possibly its uncommonType). So 324 // a function type with one method, one input, and one output is: 325 // 326 // struct { 327 // funcType 328 // uncommonType 329 // [2]*rtype // [0] is in, [1] is out 330 // } 331 type funcType = abi.FuncType 332 333 // interfaceType represents an interface type. 334 type interfaceType struct { 335 abi.InterfaceType // can embed directly because not a public type. 336 } 337 338 func (t *interfaceType) nameOff(off aNameOff) abi.Name { 339 return toRType(&t.Type).nameOff(off) 340 } 341 342 func nameOffFor(t *abi.Type, off aNameOff) abi.Name { 343 return toRType(t).nameOff(off) 344 } 345 346 func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type { 347 return toRType(t).typeOff(off) 348 } 349 350 func (t *interfaceType) typeOff(off aTypeOff) *abi.Type { 351 return toRType(&t.Type).typeOff(off) 352 } 353 354 func (t *interfaceType) common() *abi.Type { 355 return &t.Type 356 } 357 358 func (t *interfaceType) uncommon() *abi.UncommonType { 359 return t.Uncommon() 360 } 361 362 // mapType represents a map type. 363 type mapType struct { 364 abi.MapType 365 } 366 367 // ptrType represents a pointer type. 368 type ptrType struct { 369 abi.PtrType 370 } 371 372 // sliceType represents a slice type. 373 type sliceType struct { 374 abi.SliceType 375 } 376 377 // Struct field 378 type structField = abi.StructField 379 380 // structType represents a struct type. 381 type structType struct { 382 abi.StructType 383 } 384 385 func pkgPath(n abi.Name) string { 386 if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 { 387 return "" 388 } 389 i, l := n.ReadVarint(1) 390 off := 1 + i + l 391 if n.HasTag() { 392 i2, l2 := n.ReadVarint(off) 393 off += i2 + l2 394 } 395 var nameOff int32 396 // Note that this field may not be aligned in memory, 397 // so we cannot use a direct int32 assignment here. 398 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:]) 399 pkgPathName := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))} 400 return pkgPathName.Name() 401 } 402 403 func newName(n, tag string, exported, embedded bool) abi.Name { 404 return abi.NewName(n, tag, exported, embedded) 405 } 406 407 /* 408 * The compiler knows the exact layout of all the data structures above. 409 * The compiler does not know about the data structures and methods below. 410 */ 411 412 // Method represents a single method. 413 type Method struct { 414 // Name is the method name. 415 Name string 416 417 // PkgPath is the package path that qualifies a lower case (unexported) 418 // method name. It is empty for upper case (exported) method names. 419 // The combination of PkgPath and Name uniquely identifies a method 420 // in a method set. 421 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 422 PkgPath string 423 424 Type Type // method type 425 Func Value // func with receiver as first argument 426 Index int // index for Type.Method 427 } 428 429 // IsExported reports whether the method is exported. 430 func (m Method) IsExported() bool { 431 return m.PkgPath == "" 432 } 433 434 const ( 435 kindDirectIface = 1 << 5 436 kindGCProg = 1 << 6 // Type.gc points to GC program 437 kindMask = (1 << 5) - 1 438 ) 439 440 // String returns the name of k. 441 func (k Kind) String() string { 442 if uint(k) < uint(len(kindNames)) { 443 return kindNames[uint(k)] 444 } 445 return "kind" + strconv.Itoa(int(k)) 446 } 447 448 var kindNames = []string{ 449 Invalid: "invalid", 450 Bool: "bool", 451 Int: "int", 452 Int8: "int8", 453 Int16: "int16", 454 Int32: "int32", 455 Int64: "int64", 456 Uint: "uint", 457 Uint8: "uint8", 458 Uint16: "uint16", 459 Uint32: "uint32", 460 Uint64: "uint64", 461 Uintptr: "uintptr", 462 Float32: "float32", 463 Float64: "float64", 464 Complex64: "complex64", 465 Complex128: "complex128", 466 Array: "array", 467 Chan: "chan", 468 Func: "func", 469 Interface: "interface", 470 Map: "map", 471 Pointer: "ptr", 472 Slice: "slice", 473 String: "string", 474 Struct: "struct", 475 UnsafePointer: "unsafe.Pointer", 476 } 477 478 // resolveNameOff resolves a name offset from a base pointer. 479 // The (*rtype).nameOff method is a convenience wrapper for this function. 480 // Implemented in the runtime package. 481 // 482 //go:noescape 483 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 484 485 // resolveTypeOff resolves an *rtype offset from a base type. 486 // The (*rtype).typeOff method is a convenience wrapper for this function. 487 // Implemented in the runtime package. 488 // 489 //go:noescape 490 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 491 492 // resolveTextOff resolves a function pointer offset from a base type. 493 // The (*rtype).textOff method is a convenience wrapper for this function. 494 // Implemented in the runtime package. 495 // 496 //go:noescape 497 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 498 499 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 500 // It returns a new ID that can be used as a typeOff or textOff, and will 501 // be resolved correctly. Implemented in the runtime package. 502 // 503 //go:noescape 504 func addReflectOff(ptr unsafe.Pointer) int32 505 506 // resolveReflectName adds a name to the reflection lookup map in the runtime. 507 // It returns a new nameOff that can be used to refer to the pointer. 508 func resolveReflectName(n abi.Name) aNameOff { 509 return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes))) 510 } 511 512 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 513 // It returns a new typeOff that can be used to refer to the pointer. 514 func resolveReflectType(t *abi.Type) aTypeOff { 515 return aTypeOff(addReflectOff(unsafe.Pointer(t))) 516 } 517 518 // resolveReflectText adds a function pointer to the reflection lookup map in 519 // the runtime. It returns a new textOff that can be used to refer to the 520 // pointer. 521 func resolveReflectText(ptr unsafe.Pointer) aTextOff { 522 return aTextOff(addReflectOff(ptr)) 523 } 524 525 func (t *rtype) nameOff(off aNameOff) abi.Name { 526 return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 527 } 528 529 func (t *rtype) typeOff(off aTypeOff) *abi.Type { 530 return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 531 } 532 533 func (t *rtype) textOff(off aTextOff) unsafe.Pointer { 534 return resolveTextOff(unsafe.Pointer(t), int32(off)) 535 } 536 537 func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer { 538 return toRType(t).textOff(off) 539 } 540 541 func (t *rtype) String() string { 542 s := t.nameOff(t.t.Str).Name() 543 if t.t.TFlag&abi.TFlagExtraStar != 0 { 544 return s[1:] 545 } 546 return s 547 } 548 549 func (t *rtype) Size() uintptr { return t.t.Size() } 550 551 func (t *rtype) Bits() int { 552 if t == nil { 553 panic("reflect: Bits of nil Type") 554 } 555 k := t.Kind() 556 if k < Int || k > Complex128 { 557 panic("reflect: Bits of non-arithmetic Type " + t.String()) 558 } 559 return int(t.t.Size_) * 8 560 } 561 562 func (t *rtype) Align() int { return t.t.Align() } 563 564 func (t *rtype) FieldAlign() int { return t.t.FieldAlign() } 565 566 func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) } 567 568 func (t *rtype) exportedMethods() []abi.Method { 569 ut := t.uncommon() 570 if ut == nil { 571 return nil 572 } 573 return ut.ExportedMethods() 574 } 575 576 func (t *rtype) NumMethod() int { 577 if t.Kind() == Interface { 578 tt := (*interfaceType)(unsafe.Pointer(t)) 579 return tt.NumMethod() 580 } 581 return len(t.exportedMethods()) 582 } 583 584 func (t *rtype) Method(i int) (m Method) { 585 if t.Kind() == Interface { 586 tt := (*interfaceType)(unsafe.Pointer(t)) 587 return tt.Method(i) 588 } 589 methods := t.exportedMethods() 590 if i < 0 || i >= len(methods) { 591 panic("reflect: Method index out of range") 592 } 593 p := methods[i] 594 pname := t.nameOff(p.Name) 595 m.Name = pname.Name() 596 fl := flag(Func) 597 mtyp := t.typeOff(p.Mtyp) 598 ft := (*funcType)(unsafe.Pointer(mtyp)) 599 in := make([]Type, 0, 1+ft.NumIn()) 600 in = append(in, t) 601 for _, arg := range ft.InSlice() { 602 in = append(in, toRType(arg)) 603 } 604 out := make([]Type, 0, ft.NumOut()) 605 for _, ret := range ft.OutSlice() { 606 out = append(out, toRType(ret)) 607 } 608 mt := FuncOf(in, out, ft.IsVariadic()) 609 m.Type = mt 610 tfn := t.textOff(p.Tfn) 611 fn := unsafe.Pointer(&tfn) 612 m.Func = Value{&mt.(*rtype).t, fn, fl} 613 614 m.Index = i 615 return m 616 } 617 618 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 619 if t.Kind() == Interface { 620 tt := (*interfaceType)(unsafe.Pointer(t)) 621 return tt.MethodByName(name) 622 } 623 ut := t.uncommon() 624 if ut == nil { 625 return Method{}, false 626 } 627 628 methods := ut.ExportedMethods() 629 630 // We are looking for the first index i where the string becomes >= s. 631 // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name). 632 i, j := 0, len(methods) 633 for i < j { 634 h := int(uint(i+j) >> 1) // avoid overflow when computing h 635 // i ≤ h < j 636 if !(t.nameOff(methods[h].Name).Name() >= name) { 637 i = h + 1 // preserves f(i-1) == false 638 } else { 639 j = h // preserves f(j) == true 640 } 641 } 642 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 643 if i < len(methods) && name == t.nameOff(methods[i].Name).Name() { 644 return t.Method(i), true 645 } 646 647 return Method{}, false 648 } 649 650 func (t *rtype) PkgPath() string { 651 if t.t.TFlag&abi.TFlagNamed == 0 { 652 return "" 653 } 654 ut := t.uncommon() 655 if ut == nil { 656 return "" 657 } 658 return t.nameOff(ut.PkgPath).Name() 659 } 660 661 func pkgPathFor(t *abi.Type) string { 662 return toRType(t).PkgPath() 663 } 664 665 func (t *rtype) Name() string { 666 if !t.t.HasName() { 667 return "" 668 } 669 s := t.String() 670 i := len(s) - 1 671 sqBrackets := 0 672 for i >= 0 && (s[i] != '.' || sqBrackets != 0) { 673 switch s[i] { 674 case ']': 675 sqBrackets++ 676 case '[': 677 sqBrackets-- 678 } 679 i-- 680 } 681 return s[i+1:] 682 } 683 684 func nameFor(t *abi.Type) string { 685 return toRType(t).Name() 686 } 687 688 func (t *rtype) ChanDir() ChanDir { 689 if t.Kind() != Chan { 690 panic("reflect: ChanDir of non-chan type " + t.String()) 691 } 692 tt := (*abi.ChanType)(unsafe.Pointer(t)) 693 return ChanDir(tt.Dir) 694 } 695 696 func toRType(t *abi.Type) *rtype { 697 return (*rtype)(unsafe.Pointer(t)) 698 } 699 700 func elem(t *abi.Type) *abi.Type { 701 et := t.Elem() 702 if et != nil { 703 return et 704 } 705 panic("reflect: Elem of invalid type " + stringFor(t)) 706 } 707 708 func (t *rtype) Elem() Type { 709 return toType(elem(t.common())) 710 } 711 712 func (t *rtype) Field(i int) StructField { 713 if t.Kind() != Struct { 714 panic("reflect: Field of non-struct type " + t.String()) 715 } 716 tt := (*structType)(unsafe.Pointer(t)) 717 return tt.Field(i) 718 } 719 720 func (t *rtype) FieldByIndex(index []int) StructField { 721 if t.Kind() != Struct { 722 panic("reflect: FieldByIndex of non-struct type " + t.String()) 723 } 724 tt := (*structType)(unsafe.Pointer(t)) 725 return tt.FieldByIndex(index) 726 } 727 728 func (t *rtype) FieldByName(name string) (StructField, bool) { 729 if t.Kind() != Struct { 730 panic("reflect: FieldByName of non-struct type " + t.String()) 731 } 732 tt := (*structType)(unsafe.Pointer(t)) 733 return tt.FieldByName(name) 734 } 735 736 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 737 if t.Kind() != Struct { 738 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 739 } 740 tt := (*structType)(unsafe.Pointer(t)) 741 return tt.FieldByNameFunc(match) 742 } 743 744 func (t *rtype) Key() Type { 745 if t.Kind() != Map { 746 panic("reflect: Key of non-map type " + t.String()) 747 } 748 tt := (*mapType)(unsafe.Pointer(t)) 749 return toType(tt.Key) 750 } 751 752 func (t *rtype) Len() int { 753 if t.Kind() != Array { 754 panic("reflect: Len of non-array type " + t.String()) 755 } 756 tt := (*arrayType)(unsafe.Pointer(t)) 757 return int(tt.Len) 758 } 759 760 func (t *rtype) NumField() int { 761 if t.Kind() != Struct { 762 panic("reflect: NumField of non-struct type " + t.String()) 763 } 764 tt := (*structType)(unsafe.Pointer(t)) 765 return len(tt.Fields) 766 } 767 768 func (t *rtype) In(i int) Type { 769 if t.Kind() != Func { 770 panic("reflect: In of non-func type " + t.String()) 771 } 772 tt := (*abi.FuncType)(unsafe.Pointer(t)) 773 return toType(tt.InSlice()[i]) 774 } 775 776 func (t *rtype) NumIn() int { 777 if t.Kind() != Func { 778 panic("reflect: NumIn of non-func type " + t.String()) 779 } 780 tt := (*abi.FuncType)(unsafe.Pointer(t)) 781 return tt.NumIn() 782 } 783 784 func (t *rtype) NumOut() int { 785 if t.Kind() != Func { 786 panic("reflect: NumOut of non-func type " + t.String()) 787 } 788 tt := (*abi.FuncType)(unsafe.Pointer(t)) 789 return tt.NumOut() 790 } 791 792 func (t *rtype) Out(i int) Type { 793 if t.Kind() != Func { 794 panic("reflect: Out of non-func type " + t.String()) 795 } 796 tt := (*abi.FuncType)(unsafe.Pointer(t)) 797 return toType(tt.OutSlice()[i]) 798 } 799 800 func (t *rtype) IsVariadic() bool { 801 if t.Kind() != Func { 802 panic("reflect: IsVariadic of non-func type " + t.String()) 803 } 804 tt := (*abi.FuncType)(unsafe.Pointer(t)) 805 return tt.IsVariadic() 806 } 807 808 // add returns p+x. 809 // 810 // The whySafe string is ignored, so that the function still inlines 811 // as efficiently as p+x, but all call sites should use the string to 812 // record why the addition is safe, which is to say why the addition 813 // does not cause x to advance to the very end of p's allocation 814 // and therefore point incorrectly at the next block in memory. 815 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 816 return unsafe.Pointer(uintptr(p) + x) 817 } 818 819 func (d ChanDir) String() string { 820 switch d { 821 case SendDir: 822 return "chan<-" 823 case RecvDir: 824 return "<-chan" 825 case BothDir: 826 return "chan" 827 } 828 return "ChanDir" + strconv.Itoa(int(d)) 829 } 830 831 // Method returns the i'th method in the type's method set. 832 func (t *interfaceType) Method(i int) (m Method) { 833 if i < 0 || i >= len(t.Methods) { 834 return 835 } 836 p := &t.Methods[i] 837 pname := t.nameOff(p.Name) 838 m.Name = pname.Name() 839 if !pname.IsExported() { 840 m.PkgPath = pkgPath(pname) 841 if m.PkgPath == "" { 842 m.PkgPath = t.PkgPath.Name() 843 } 844 } 845 m.Type = toType(t.typeOff(p.Typ)) 846 m.Index = i 847 return 848 } 849 850 // NumMethod returns the number of interface methods in the type's method set. 851 func (t *interfaceType) NumMethod() int { return len(t.Methods) } 852 853 // MethodByName method with the given name in the type's method set. 854 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 855 if t == nil { 856 return 857 } 858 var p *abi.Imethod 859 for i := range t.Methods { 860 p = &t.Methods[i] 861 if t.nameOff(p.Name).Name() == name { 862 return t.Method(i), true 863 } 864 } 865 return 866 } 867 868 // A StructField describes a single field in a struct. 869 type StructField struct { 870 // Name is the field name. 871 Name string 872 873 // PkgPath is the package path that qualifies a lower case (unexported) 874 // field name. It is empty for upper case (exported) field names. 875 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 876 PkgPath string 877 878 Type Type // field type 879 Tag StructTag // field tag string 880 Offset uintptr // offset within struct, in bytes 881 Index []int // index sequence for Type.FieldByIndex 882 Anonymous bool // is an embedded field 883 } 884 885 // IsExported reports whether the field is exported. 886 func (f StructField) IsExported() bool { 887 return f.PkgPath == "" 888 } 889 890 // A StructTag is the tag string in a struct field. 891 // 892 // By convention, tag strings are a concatenation of 893 // optionally space-separated key:"value" pairs. 894 // Each key is a non-empty string consisting of non-control 895 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 896 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 897 // characters and Go string literal syntax. 898 type StructTag string 899 900 // Get returns the value associated with key in the tag string. 901 // If there is no such key in the tag, Get returns the empty string. 902 // If the tag does not have the conventional format, the value 903 // returned by Get is unspecified. To determine whether a tag is 904 // explicitly set to the empty string, use Lookup. 905 func (tag StructTag) Get(key string) string { 906 v, _ := tag.Lookup(key) 907 return v 908 } 909 910 // Lookup returns the value associated with key in the tag string. 911 // If the key is present in the tag the value (which may be empty) 912 // is returned. Otherwise the returned value will be the empty string. 913 // The ok return value reports whether the value was explicitly set in 914 // the tag string. If the tag does not have the conventional format, 915 // the value returned by Lookup is unspecified. 916 func (tag StructTag) Lookup(key string) (value string, ok bool) { 917 // When modifying this code, also update the validateStructTag code 918 // in cmd/vet/structtag.go. 919 920 for tag != "" { 921 // Skip leading space. 922 i := 0 923 for i < len(tag) && tag[i] == ' ' { 924 i++ 925 } 926 tag = tag[i:] 927 if tag == "" { 928 break 929 } 930 931 // Scan to colon. A space, a quote or a control character is a syntax error. 932 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 933 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 934 // as it is simpler to inspect the tag's bytes than the tag's runes. 935 i = 0 936 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 937 i++ 938 } 939 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 940 break 941 } 942 name := string(tag[:i]) 943 tag = tag[i+1:] 944 945 // Scan quoted string to find value. 946 i = 1 947 for i < len(tag) && tag[i] != '"' { 948 if tag[i] == '\\' { 949 i++ 950 } 951 i++ 952 } 953 if i >= len(tag) { 954 break 955 } 956 qvalue := string(tag[:i+1]) 957 tag = tag[i+1:] 958 959 if key == name { 960 value, err := strconv.Unquote(qvalue) 961 if err != nil { 962 break 963 } 964 return value, true 965 } 966 } 967 return "", false 968 } 969 970 // Field returns the i'th struct field. 971 func (t *structType) Field(i int) (f StructField) { 972 if i < 0 || i >= len(t.Fields) { 973 panic("reflect: Field index out of bounds") 974 } 975 p := &t.Fields[i] 976 f.Type = toType(p.Typ) 977 f.Name = p.Name.Name() 978 f.Anonymous = p.Embedded() 979 if !p.Name.IsExported() { 980 f.PkgPath = t.PkgPath.Name() 981 } 982 if tag := p.Name.Tag(); tag != "" { 983 f.Tag = StructTag(tag) 984 } 985 f.Offset = p.Offset 986 987 // NOTE(rsc): This is the only allocation in the interface 988 // presented by a reflect.Type. It would be nice to avoid, 989 // at least in the common cases, but we need to make sure 990 // that misbehaving clients of reflect cannot affect other 991 // uses of reflect. One possibility is CL 5371098, but we 992 // postponed that ugliness until there is a demonstrated 993 // need for the performance. This is issue 2320. 994 f.Index = []int{i} 995 return 996 } 997 998 // TODO(gri): Should there be an error/bool indicator if the index 999 // is wrong for FieldByIndex? 1000 1001 // FieldByIndex returns the nested field corresponding to index. 1002 func (t *structType) FieldByIndex(index []int) (f StructField) { 1003 f.Type = toType(&t.Type) 1004 for i, x := range index { 1005 if i > 0 { 1006 ft := f.Type 1007 if ft.Kind() == Pointer && ft.Elem().Kind() == Struct { 1008 ft = ft.Elem() 1009 } 1010 f.Type = ft 1011 } 1012 f = f.Type.Field(x) 1013 } 1014 return 1015 } 1016 1017 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1018 type fieldScan struct { 1019 typ *structType 1020 index []int 1021 } 1022 1023 // FieldByNameFunc returns the struct field with a name that satisfies the 1024 // match function and a boolean to indicate if the field was found. 1025 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1026 // This uses the same condition that the Go language does: there must be a unique instance 1027 // of the match at a given depth level. If there are multiple instances of a match at the 1028 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1029 // The algorithm is breadth first search, one depth level at a time. 1030 1031 // The current and next slices are work queues: 1032 // current lists the fields to visit on this depth level, 1033 // and next lists the fields on the next lower level. 1034 current := []fieldScan{} 1035 next := []fieldScan{{typ: t}} 1036 1037 // nextCount records the number of times an embedded type has been 1038 // encountered and considered for queueing in the 'next' slice. 1039 // We only queue the first one, but we increment the count on each. 1040 // If a struct type T can be reached more than once at a given depth level, 1041 // then it annihilates itself and need not be considered at all when we 1042 // process that next depth level. 1043 var nextCount map[*structType]int 1044 1045 // visited records the structs that have been considered already. 1046 // Embedded pointer fields can create cycles in the graph of 1047 // reachable embedded types; visited avoids following those cycles. 1048 // It also avoids duplicated effort: if we didn't find the field in an 1049 // embedded type T at level 2, we won't find it in one at level 4 either. 1050 visited := map[*structType]bool{} 1051 1052 for len(next) > 0 { 1053 current, next = next, current[:0] 1054 count := nextCount 1055 nextCount = nil 1056 1057 // Process all the fields at this depth, now listed in 'current'. 1058 // The loop queues embedded fields found in 'next', for processing during the next 1059 // iteration. The multiplicity of the 'current' field counts is recorded 1060 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1061 for _, scan := range current { 1062 t := scan.typ 1063 if visited[t] { 1064 // We've looked through this type before, at a higher level. 1065 // That higher level would shadow the lower level we're now at, 1066 // so this one can't be useful to us. Ignore it. 1067 continue 1068 } 1069 visited[t] = true 1070 for i := range t.Fields { 1071 f := &t.Fields[i] 1072 // Find name and (for embedded field) type for field f. 1073 fname := f.Name.Name() 1074 var ntyp *abi.Type 1075 if f.Embedded() { 1076 // Embedded field of type T or *T. 1077 ntyp = f.Typ 1078 if ntyp.Kind() == abi.Pointer { 1079 ntyp = ntyp.Elem() 1080 } 1081 } 1082 1083 // Does it match? 1084 if match(fname) { 1085 // Potential match 1086 if count[t] > 1 || ok { 1087 // Name appeared multiple times at this level: annihilate. 1088 return StructField{}, false 1089 } 1090 result = t.Field(i) 1091 result.Index = nil 1092 result.Index = append(result.Index, scan.index...) 1093 result.Index = append(result.Index, i) 1094 ok = true 1095 continue 1096 } 1097 1098 // Queue embedded struct fields for processing with next level, 1099 // but only if we haven't seen a match yet at this level and only 1100 // if the embedded types haven't already been queued. 1101 if ok || ntyp == nil || ntyp.Kind() != abi.Struct { 1102 continue 1103 } 1104 styp := (*structType)(unsafe.Pointer(ntyp)) 1105 if nextCount[styp] > 0 { 1106 nextCount[styp] = 2 // exact multiple doesn't matter 1107 continue 1108 } 1109 if nextCount == nil { 1110 nextCount = map[*structType]int{} 1111 } 1112 nextCount[styp] = 1 1113 if count[t] > 1 { 1114 nextCount[styp] = 2 // exact multiple doesn't matter 1115 } 1116 var index []int 1117 index = append(index, scan.index...) 1118 index = append(index, i) 1119 next = append(next, fieldScan{styp, index}) 1120 } 1121 } 1122 if ok { 1123 break 1124 } 1125 } 1126 return 1127 } 1128 1129 // FieldByName returns the struct field with the given name 1130 // and a boolean to indicate if the field was found. 1131 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1132 // Quick check for top-level name, or struct without embedded fields. 1133 hasEmbeds := false 1134 if name != "" { 1135 for i := range t.Fields { 1136 tf := &t.Fields[i] 1137 if tf.Name.Name() == name { 1138 return t.Field(i), true 1139 } 1140 if tf.Embedded() { 1141 hasEmbeds = true 1142 } 1143 } 1144 } 1145 if !hasEmbeds { 1146 return 1147 } 1148 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1149 } 1150 1151 // TypeOf returns the reflection Type that represents the dynamic type of i. 1152 // If i is a nil interface value, TypeOf returns nil. 1153 func TypeOf(i any) Type { 1154 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1155 // Noescape so this doesn't make i to escape. See the comment 1156 // at Value.typ for why this is safe. 1157 return toType((*abi.Type)(noescape(unsafe.Pointer(eface.typ)))) 1158 } 1159 1160 // rtypeOf directly extracts the *rtype of the provided value. 1161 func rtypeOf(i any) *abi.Type { 1162 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1163 return eface.typ 1164 } 1165 1166 // ptrMap is the cache for PointerTo. 1167 var ptrMap sync.Map // map[*rtype]*ptrType 1168 1169 // PtrTo returns the pointer type with element t. 1170 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1171 // 1172 // PtrTo is the old spelling of PointerTo. 1173 // The two functions behave identically. 1174 func PtrTo(t Type) Type { return PointerTo(t) } 1175 1176 // PointerTo returns the pointer type with element t. 1177 // For example, if t represents type Foo, PointerTo(t) represents *Foo. 1178 func PointerTo(t Type) Type { 1179 return toRType(t.(*rtype).ptrTo()) 1180 } 1181 1182 func (t *rtype) ptrTo() *abi.Type { 1183 at := &t.t 1184 if at.PtrToThis != 0 { 1185 return t.typeOff(at.PtrToThis) 1186 } 1187 1188 // Check the cache. 1189 if pi, ok := ptrMap.Load(t); ok { 1190 return &pi.(*ptrType).Type 1191 } 1192 1193 // Look in known types. 1194 s := "*" + t.String() 1195 for _, tt := range typesByString(s) { 1196 p := (*ptrType)(unsafe.Pointer(tt)) 1197 if p.Elem != &t.t { 1198 continue 1199 } 1200 pi, _ := ptrMap.LoadOrStore(t, p) 1201 return &pi.(*ptrType).Type 1202 } 1203 1204 // Create a new ptrType starting with the description 1205 // of an *unsafe.Pointer. 1206 var iptr any = (*unsafe.Pointer)(nil) 1207 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1208 pp := *prototype 1209 1210 pp.Str = resolveReflectName(newName(s, "", false, false)) 1211 pp.PtrToThis = 0 1212 1213 // For the type structures linked into the binary, the 1214 // compiler provides a good hash of the string. 1215 // Create a good hash for the new string by using 1216 // the FNV-1 hash's mixing function to combine the 1217 // old hash and the new "*". 1218 pp.Hash = fnv1(t.t.Hash, '*') 1219 1220 pp.Elem = at 1221 1222 pi, _ := ptrMap.LoadOrStore(t, &pp) 1223 return &pi.(*ptrType).Type 1224 } 1225 1226 func ptrTo(t *abi.Type) *abi.Type { 1227 return toRType(t).ptrTo() 1228 } 1229 1230 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1231 func fnv1(x uint32, list ...byte) uint32 { 1232 for _, b := range list { 1233 x = x*16777619 ^ uint32(b) 1234 } 1235 return x 1236 } 1237 1238 func (t *rtype) Implements(u Type) bool { 1239 if u == nil { 1240 panic("reflect: nil type passed to Type.Implements") 1241 } 1242 if u.Kind() != Interface { 1243 panic("reflect: non-interface type passed to Type.Implements") 1244 } 1245 return implements(u.common(), t.common()) 1246 } 1247 1248 func (t *rtype) AssignableTo(u Type) bool { 1249 if u == nil { 1250 panic("reflect: nil type passed to Type.AssignableTo") 1251 } 1252 uu := u.common() 1253 return directlyAssignable(uu, t.common()) || implements(uu, t.common()) 1254 } 1255 1256 func (t *rtype) ConvertibleTo(u Type) bool { 1257 if u == nil { 1258 panic("reflect: nil type passed to Type.ConvertibleTo") 1259 } 1260 return convertOp(u.common(), t.common()) != nil 1261 } 1262 1263 func (t *rtype) Comparable() bool { 1264 return t.t.Equal != nil 1265 } 1266 1267 // implements reports whether the type V implements the interface type T. 1268 func implements(T, V *abi.Type) bool { 1269 if T.Kind() != abi.Interface { 1270 return false 1271 } 1272 t := (*interfaceType)(unsafe.Pointer(T)) 1273 if len(t.Methods) == 0 { 1274 return true 1275 } 1276 1277 // The same algorithm applies in both cases, but the 1278 // method tables for an interface type and a concrete type 1279 // are different, so the code is duplicated. 1280 // In both cases the algorithm is a linear scan over the two 1281 // lists - T's methods and V's methods - simultaneously. 1282 // Since method tables are stored in a unique sorted order 1283 // (alphabetical, with no duplicate method names), the scan 1284 // through V's methods must hit a match for each of T's 1285 // methods along the way, or else V does not implement T. 1286 // This lets us run the scan in overall linear time instead of 1287 // the quadratic time a naive search would require. 1288 // See also ../runtime/iface.go. 1289 if V.Kind() == abi.Interface { 1290 v := (*interfaceType)(unsafe.Pointer(V)) 1291 i := 0 1292 for j := 0; j < len(v.Methods); j++ { 1293 tm := &t.Methods[i] 1294 tmName := t.nameOff(tm.Name) 1295 vm := &v.Methods[j] 1296 vmName := nameOffFor(V, vm.Name) 1297 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) { 1298 if !tmName.IsExported() { 1299 tmPkgPath := pkgPath(tmName) 1300 if tmPkgPath == "" { 1301 tmPkgPath = t.PkgPath.Name() 1302 } 1303 vmPkgPath := pkgPath(vmName) 1304 if vmPkgPath == "" { 1305 vmPkgPath = v.PkgPath.Name() 1306 } 1307 if tmPkgPath != vmPkgPath { 1308 continue 1309 } 1310 } 1311 if i++; i >= len(t.Methods) { 1312 return true 1313 } 1314 } 1315 } 1316 return false 1317 } 1318 1319 v := V.Uncommon() 1320 if v == nil { 1321 return false 1322 } 1323 i := 0 1324 vmethods := v.Methods() 1325 for j := 0; j < int(v.Mcount); j++ { 1326 tm := &t.Methods[i] 1327 tmName := t.nameOff(tm.Name) 1328 vm := vmethods[j] 1329 vmName := nameOffFor(V, vm.Name) 1330 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) { 1331 if !tmName.IsExported() { 1332 tmPkgPath := pkgPath(tmName) 1333 if tmPkgPath == "" { 1334 tmPkgPath = t.PkgPath.Name() 1335 } 1336 vmPkgPath := pkgPath(vmName) 1337 if vmPkgPath == "" { 1338 vmPkgPath = nameOffFor(V, v.PkgPath).Name() 1339 } 1340 if tmPkgPath != vmPkgPath { 1341 continue 1342 } 1343 } 1344 if i++; i >= len(t.Methods) { 1345 return true 1346 } 1347 } 1348 } 1349 return false 1350 } 1351 1352 // specialChannelAssignability reports whether a value x of channel type V 1353 // can be directly assigned (using memmove) to another channel type T. 1354 // https://golang.org/doc/go_spec.html#Assignability 1355 // T and V must be both of Chan kind. 1356 func specialChannelAssignability(T, V *abi.Type) bool { 1357 // Special case: 1358 // x is a bidirectional channel value, T is a channel type, 1359 // x's type V and T have identical element types, 1360 // and at least one of V or T is not a defined type. 1361 return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1362 } 1363 1364 // directlyAssignable reports whether a value x of type V can be directly 1365 // assigned (using memmove) to a value of type T. 1366 // https://golang.org/doc/go_spec.html#Assignability 1367 // Ignoring the interface rules (implemented elsewhere) 1368 // and the ideal constant rules (no ideal constants at run time). 1369 func directlyAssignable(T, V *abi.Type) bool { 1370 // x's type V is identical to T? 1371 if T == V { 1372 return true 1373 } 1374 1375 // Otherwise at least one of T and V must not be defined 1376 // and they must have the same kind. 1377 if T.HasName() && V.HasName() || T.Kind() != V.Kind() { 1378 return false 1379 } 1380 1381 if T.Kind() == abi.Chan && specialChannelAssignability(T, V) { 1382 return true 1383 } 1384 1385 // x's type T and V must have identical underlying types. 1386 return haveIdenticalUnderlyingType(T, V, true) 1387 } 1388 1389 func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool { 1390 if cmpTags { 1391 return T == V 1392 } 1393 1394 if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) { 1395 return false 1396 } 1397 1398 return haveIdenticalUnderlyingType(T, V, false) 1399 } 1400 1401 func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool { 1402 if T == V { 1403 return true 1404 } 1405 1406 kind := Kind(T.Kind()) 1407 if kind != Kind(V.Kind()) { 1408 return false 1409 } 1410 1411 // Non-composite types of equal kind have same underlying type 1412 // (the predefined instance of the type). 1413 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1414 return true 1415 } 1416 1417 // Composite types. 1418 switch kind { 1419 case Array: 1420 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1421 1422 case Chan: 1423 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1424 1425 case Func: 1426 t := (*funcType)(unsafe.Pointer(T)) 1427 v := (*funcType)(unsafe.Pointer(V)) 1428 if t.OutCount != v.OutCount || t.InCount != v.InCount { 1429 return false 1430 } 1431 for i := 0; i < t.NumIn(); i++ { 1432 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1433 return false 1434 } 1435 } 1436 for i := 0; i < t.NumOut(); i++ { 1437 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1438 return false 1439 } 1440 } 1441 return true 1442 1443 case Interface: 1444 t := (*interfaceType)(unsafe.Pointer(T)) 1445 v := (*interfaceType)(unsafe.Pointer(V)) 1446 if len(t.Methods) == 0 && len(v.Methods) == 0 { 1447 return true 1448 } 1449 // Might have the same methods but still 1450 // need a run time conversion. 1451 return false 1452 1453 case Map: 1454 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1455 1456 case Pointer, Slice: 1457 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1458 1459 case Struct: 1460 t := (*structType)(unsafe.Pointer(T)) 1461 v := (*structType)(unsafe.Pointer(V)) 1462 if len(t.Fields) != len(v.Fields) { 1463 return false 1464 } 1465 if t.PkgPath.Name() != v.PkgPath.Name() { 1466 return false 1467 } 1468 for i := range t.Fields { 1469 tf := &t.Fields[i] 1470 vf := &v.Fields[i] 1471 if tf.Name.Name() != vf.Name.Name() { 1472 return false 1473 } 1474 if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) { 1475 return false 1476 } 1477 if cmpTags && tf.Name.Tag() != vf.Name.Tag() { 1478 return false 1479 } 1480 if tf.Offset != vf.Offset { 1481 return false 1482 } 1483 if tf.Embedded() != vf.Embedded() { 1484 return false 1485 } 1486 } 1487 return true 1488 } 1489 1490 return false 1491 } 1492 1493 // typelinks is implemented in package runtime. 1494 // It returns a slice of the sections in each module, 1495 // and a slice of *rtype offsets in each module. 1496 // 1497 // The types in each module are sorted by string. That is, the first 1498 // two linked types of the first module are: 1499 // 1500 // d0 := sections[0] 1501 // t1 := (*rtype)(add(d0, offset[0][0])) 1502 // t2 := (*rtype)(add(d0, offset[0][1])) 1503 // 1504 // and 1505 // 1506 // t1.String() < t2.String() 1507 // 1508 // Note that strings are not unique identifiers for types: 1509 // there can be more than one with a given string. 1510 // Only types we might want to look up are included: 1511 // pointers, channels, maps, slices, and arrays. 1512 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1513 1514 func rtypeOff(section unsafe.Pointer, off int32) *abi.Type { 1515 return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1516 } 1517 1518 // typesByString returns the subslice of typelinks() whose elements have 1519 // the given string representation. 1520 // It may be empty (no known types with that string) or may have 1521 // multiple elements (multiple types with that string). 1522 func typesByString(s string) []*abi.Type { 1523 sections, offset := typelinks() 1524 var ret []*abi.Type 1525 1526 for offsI, offs := range offset { 1527 section := sections[offsI] 1528 1529 // We are looking for the first index i where the string becomes >= s. 1530 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1531 i, j := 0, len(offs) 1532 for i < j { 1533 h := i + (j-i)>>1 // avoid overflow when computing h 1534 // i ≤ h < j 1535 if !(stringFor(rtypeOff(section, offs[h])) >= s) { 1536 i = h + 1 // preserves f(i-1) == false 1537 } else { 1538 j = h // preserves f(j) == true 1539 } 1540 } 1541 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1542 1543 // Having found the first, linear scan forward to find the last. 1544 // We could do a second binary search, but the caller is going 1545 // to do a linear scan anyway. 1546 for j := i; j < len(offs); j++ { 1547 typ := rtypeOff(section, offs[j]) 1548 if stringFor(typ) != s { 1549 break 1550 } 1551 ret = append(ret, typ) 1552 } 1553 } 1554 return ret 1555 } 1556 1557 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1558 var lookupCache sync.Map // map[cacheKey]*rtype 1559 1560 // A cacheKey is the key for use in the lookupCache. 1561 // Four values describe any of the types we are looking for: 1562 // type kind, one or two subtypes, and an extra integer. 1563 type cacheKey struct { 1564 kind Kind 1565 t1 *abi.Type 1566 t2 *abi.Type 1567 extra uintptr 1568 } 1569 1570 // The funcLookupCache caches FuncOf lookups. 1571 // FuncOf does not share the common lookupCache since cacheKey is not 1572 // sufficient to represent functions unambiguously. 1573 var funcLookupCache struct { 1574 sync.Mutex // Guards stores (but not loads) on m. 1575 1576 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1577 // Elements of m are append-only and thus safe for concurrent reading. 1578 m sync.Map 1579 } 1580 1581 // ChanOf returns the channel type with the given direction and element type. 1582 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1583 // 1584 // The gc runtime imposes a limit of 64 kB on channel element types. 1585 // If t's size is equal to or exceeds this limit, ChanOf panics. 1586 func ChanOf(dir ChanDir, t Type) Type { 1587 typ := t.common() 1588 1589 // Look in cache. 1590 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1591 if ch, ok := lookupCache.Load(ckey); ok { 1592 return ch.(*rtype) 1593 } 1594 1595 // This restriction is imposed by the gc compiler and the runtime. 1596 if typ.Size_ >= 1<<16 { 1597 panic("reflect.ChanOf: element size too large") 1598 } 1599 1600 // Look in known types. 1601 var s string 1602 switch dir { 1603 default: 1604 panic("reflect.ChanOf: invalid dir") 1605 case SendDir: 1606 s = "chan<- " + stringFor(typ) 1607 case RecvDir: 1608 s = "<-chan " + stringFor(typ) 1609 case BothDir: 1610 typeStr := stringFor(typ) 1611 if typeStr[0] == '<' { 1612 // typ is recv chan, need parentheses as "<-" associates with leftmost 1613 // chan possible, see: 1614 // * https://golang.org/ref/spec#Channel_types 1615 // * https://github.com/golang/go/issues/39897 1616 s = "chan (" + typeStr + ")" 1617 } else { 1618 s = "chan " + typeStr 1619 } 1620 } 1621 for _, tt := range typesByString(s) { 1622 ch := (*chanType)(unsafe.Pointer(tt)) 1623 if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) { 1624 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 1625 return ti.(Type) 1626 } 1627 } 1628 1629 // Make a channel type. 1630 var ichan any = (chan unsafe.Pointer)(nil) 1631 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1632 ch := *prototype 1633 ch.TFlag = abi.TFlagRegularMemory 1634 ch.Dir = abi.ChanDir(dir) 1635 ch.Str = resolveReflectName(newName(s, "", false, false)) 1636 ch.Hash = fnv1(typ.Hash, 'c', byte(dir)) 1637 ch.Elem = typ 1638 1639 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type)) 1640 return ti.(Type) 1641 } 1642 1643 // MapOf returns the map type with the given key and element types. 1644 // For example, if k represents int and e represents string, 1645 // MapOf(k, e) represents map[int]string. 1646 // 1647 // If the key type is not a valid map key type (that is, if it does 1648 // not implement Go's == operator), MapOf panics. 1649 func MapOf(key, elem Type) Type { 1650 ktyp := key.common() 1651 etyp := elem.common() 1652 1653 if ktyp.Equal == nil { 1654 panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) 1655 } 1656 1657 // Look in cache. 1658 ckey := cacheKey{Map, ktyp, etyp, 0} 1659 if mt, ok := lookupCache.Load(ckey); ok { 1660 return mt.(Type) 1661 } 1662 1663 // Look in known types. 1664 s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) 1665 for _, tt := range typesByString(s) { 1666 mt := (*mapType)(unsafe.Pointer(tt)) 1667 if mt.Key == ktyp && mt.Elem == etyp { 1668 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 1669 return ti.(Type) 1670 } 1671 } 1672 1673 // Make a map type. 1674 // Note: flag values must match those used in the TMAP case 1675 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. 1676 var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1677 mt := **(**mapType)(unsafe.Pointer(&imap)) 1678 mt.Str = resolveReflectName(newName(s, "", false, false)) 1679 mt.TFlag = 0 1680 mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) 1681 mt.Key = ktyp 1682 mt.Elem = etyp 1683 mt.Bucket = bucketOf(ktyp, etyp) 1684 mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1685 return typehash(ktyp, p, seed) 1686 } 1687 mt.Flags = 0 1688 if ktyp.Size_ > maxKeySize { 1689 mt.KeySize = uint8(goarch.PtrSize) 1690 mt.Flags |= 1 // indirect key 1691 } else { 1692 mt.KeySize = uint8(ktyp.Size_) 1693 } 1694 if etyp.Size_ > maxValSize { 1695 mt.ValueSize = uint8(goarch.PtrSize) 1696 mt.Flags |= 2 // indirect value 1697 } else { 1698 mt.MapType.ValueSize = uint8(etyp.Size_) 1699 } 1700 mt.MapType.BucketSize = uint16(mt.Bucket.Size_) 1701 if isReflexive(ktyp) { 1702 mt.Flags |= 4 1703 } 1704 if needKeyUpdate(ktyp) { 1705 mt.Flags |= 8 1706 } 1707 if hashMightPanic(ktyp) { 1708 mt.Flags |= 16 1709 } 1710 mt.PtrToThis = 0 1711 1712 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) 1713 return ti.(Type) 1714 } 1715 1716 var funcTypes []Type 1717 var funcTypesMutex sync.Mutex 1718 1719 func initFuncTypes(n int) Type { 1720 funcTypesMutex.Lock() 1721 defer funcTypesMutex.Unlock() 1722 if n >= len(funcTypes) { 1723 newFuncTypes := make([]Type, n+1) 1724 copy(newFuncTypes, funcTypes) 1725 funcTypes = newFuncTypes 1726 } 1727 if funcTypes[n] != nil { 1728 return funcTypes[n] 1729 } 1730 1731 funcTypes[n] = StructOf([]StructField{ 1732 { 1733 Name: "FuncType", 1734 Type: TypeOf(funcType{}), 1735 }, 1736 { 1737 Name: "Args", 1738 Type: ArrayOf(n, TypeOf(&rtype{})), 1739 }, 1740 }) 1741 return funcTypes[n] 1742 } 1743 1744 // FuncOf returns the function type with the given argument and result types. 1745 // For example if k represents int and e represents string, 1746 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1747 // 1748 // The variadic argument controls whether the function is variadic. FuncOf 1749 // panics if the in[len(in)-1] does not represent a slice and variadic is 1750 // true. 1751 func FuncOf(in, out []Type, variadic bool) Type { 1752 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1753 panic("reflect.FuncOf: last arg of variadic func must be slice") 1754 } 1755 1756 // Make a func type. 1757 var ifunc any = (func())(nil) 1758 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1759 n := len(in) + len(out) 1760 1761 if n > 128 { 1762 panic("reflect.FuncOf: too many arguments") 1763 } 1764 1765 o := New(initFuncTypes(n)).Elem() 1766 ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer())) 1767 args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n] 1768 *ft = *prototype 1769 1770 // Build a hash and minimally populate ft. 1771 var hash uint32 1772 for _, in := range in { 1773 t := in.(*rtype) 1774 args = append(args, t) 1775 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) 1776 } 1777 if variadic { 1778 hash = fnv1(hash, 'v') 1779 } 1780 hash = fnv1(hash, '.') 1781 for _, out := range out { 1782 t := out.(*rtype) 1783 args = append(args, t) 1784 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) 1785 } 1786 1787 ft.TFlag = 0 1788 ft.Hash = hash 1789 ft.InCount = uint16(len(in)) 1790 ft.OutCount = uint16(len(out)) 1791 if variadic { 1792 ft.OutCount |= 1 << 15 1793 } 1794 1795 // Look in cache. 1796 if ts, ok := funcLookupCache.m.Load(hash); ok { 1797 for _, t := range ts.([]*abi.Type) { 1798 if haveIdenticalUnderlyingType(&ft.Type, t, true) { 1799 return toRType(t) 1800 } 1801 } 1802 } 1803 1804 // Not in cache, lock and retry. 1805 funcLookupCache.Lock() 1806 defer funcLookupCache.Unlock() 1807 if ts, ok := funcLookupCache.m.Load(hash); ok { 1808 for _, t := range ts.([]*abi.Type) { 1809 if haveIdenticalUnderlyingType(&ft.Type, t, true) { 1810 return toRType(t) 1811 } 1812 } 1813 } 1814 1815 addToCache := func(tt *abi.Type) Type { 1816 var rts []*abi.Type 1817 if rti, ok := funcLookupCache.m.Load(hash); ok { 1818 rts = rti.([]*abi.Type) 1819 } 1820 funcLookupCache.m.Store(hash, append(rts, tt)) 1821 return toType(tt) 1822 } 1823 1824 // Look in known types for the same string representation. 1825 str := funcStr(ft) 1826 for _, tt := range typesByString(str) { 1827 if haveIdenticalUnderlyingType(&ft.Type, tt, true) { 1828 return addToCache(tt) 1829 } 1830 } 1831 1832 // Populate the remaining fields of ft and store in cache. 1833 ft.Str = resolveReflectName(newName(str, "", false, false)) 1834 ft.PtrToThis = 0 1835 return addToCache(&ft.Type) 1836 } 1837 func stringFor(t *abi.Type) string { 1838 return toRType(t).String() 1839 } 1840 1841 // funcStr builds a string representation of a funcType. 1842 func funcStr(ft *funcType) string { 1843 repr := make([]byte, 0, 64) 1844 repr = append(repr, "func("...) 1845 for i, t := range ft.InSlice() { 1846 if i > 0 { 1847 repr = append(repr, ", "...) 1848 } 1849 if ft.IsVariadic() && i == int(ft.InCount)-1 { 1850 repr = append(repr, "..."...) 1851 repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...) 1852 } else { 1853 repr = append(repr, stringFor(t)...) 1854 } 1855 } 1856 repr = append(repr, ')') 1857 out := ft.OutSlice() 1858 if len(out) == 1 { 1859 repr = append(repr, ' ') 1860 } else if len(out) > 1 { 1861 repr = append(repr, " ("...) 1862 } 1863 for i, t := range out { 1864 if i > 0 { 1865 repr = append(repr, ", "...) 1866 } 1867 repr = append(repr, stringFor(t)...) 1868 } 1869 if len(out) > 1 { 1870 repr = append(repr, ')') 1871 } 1872 return string(repr) 1873 } 1874 1875 // isReflexive reports whether the == operation on the type is reflexive. 1876 // That is, x == x for all values x of type t. 1877 func isReflexive(t *abi.Type) bool { 1878 switch Kind(t.Kind()) { 1879 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: 1880 return true 1881 case Float32, Float64, Complex64, Complex128, Interface: 1882 return false 1883 case Array: 1884 tt := (*arrayType)(unsafe.Pointer(t)) 1885 return isReflexive(tt.Elem) 1886 case Struct: 1887 tt := (*structType)(unsafe.Pointer(t)) 1888 for _, f := range tt.Fields { 1889 if !isReflexive(f.Typ) { 1890 return false 1891 } 1892 } 1893 return true 1894 default: 1895 // Func, Map, Slice, Invalid 1896 panic("isReflexive called on non-key type " + stringFor(t)) 1897 } 1898 } 1899 1900 // needKeyUpdate reports whether map overwrites require the key to be copied. 1901 func needKeyUpdate(t *abi.Type) bool { 1902 switch Kind(t.Kind()) { 1903 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: 1904 return false 1905 case Float32, Float64, Complex64, Complex128, Interface, String: 1906 // Float keys can be updated from +0 to -0. 1907 // String keys can be updated to use a smaller backing store. 1908 // Interfaces might have floats of strings in them. 1909 return true 1910 case Array: 1911 tt := (*arrayType)(unsafe.Pointer(t)) 1912 return needKeyUpdate(tt.Elem) 1913 case Struct: 1914 tt := (*structType)(unsafe.Pointer(t)) 1915 for _, f := range tt.Fields { 1916 if needKeyUpdate(f.Typ) { 1917 return true 1918 } 1919 } 1920 return false 1921 default: 1922 // Func, Map, Slice, Invalid 1923 panic("needKeyUpdate called on non-key type " + stringFor(t)) 1924 } 1925 } 1926 1927 // hashMightPanic reports whether the hash of a map key of type t might panic. 1928 func hashMightPanic(t *abi.Type) bool { 1929 switch Kind(t.Kind()) { 1930 case Interface: 1931 return true 1932 case Array: 1933 tt := (*arrayType)(unsafe.Pointer(t)) 1934 return hashMightPanic(tt.Elem) 1935 case Struct: 1936 tt := (*structType)(unsafe.Pointer(t)) 1937 for _, f := range tt.Fields { 1938 if hashMightPanic(f.Typ) { 1939 return true 1940 } 1941 } 1942 return false 1943 default: 1944 return false 1945 } 1946 } 1947 1948 // Make sure these routines stay in sync with ../runtime/map.go! 1949 // These types exist only for GC, so we only fill out GC relevant info. 1950 // Currently, that's just size and the GC program. We also fill in string 1951 // for possible debugging use. 1952 const ( 1953 bucketSize uintptr = abi.MapBucketCount 1954 maxKeySize uintptr = abi.MapMaxKeyBytes 1955 maxValSize uintptr = abi.MapMaxElemBytes 1956 ) 1957 1958 func bucketOf(ktyp, etyp *abi.Type) *abi.Type { 1959 if ktyp.Size_ > maxKeySize { 1960 ktyp = ptrTo(ktyp) 1961 } 1962 if etyp.Size_ > maxValSize { 1963 etyp = ptrTo(etyp) 1964 } 1965 1966 // Prepare GC data if any. 1967 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes, 1968 // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap. 1969 // Note that since the key and value are known to be <= 128 bytes, 1970 // they're guaranteed to have bitmaps instead of GC programs. 1971 var gcdata *byte 1972 var ptrdata uintptr 1973 1974 size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize 1975 if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { 1976 panic("reflect: bad size computation in MapOf") 1977 } 1978 1979 if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 { 1980 nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize 1981 n := (nptr + 7) / 8 1982 1983 // Runtime needs pointer masks to be a multiple of uintptr in size. 1984 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) 1985 mask := make([]byte, n) 1986 base := bucketSize / goarch.PtrSize 1987 1988 if ktyp.PtrBytes != 0 { 1989 emitGCMask(mask, base, ktyp, bucketSize) 1990 } 1991 base += bucketSize * ktyp.Size_ / goarch.PtrSize 1992 1993 if etyp.PtrBytes != 0 { 1994 emitGCMask(mask, base, etyp, bucketSize) 1995 } 1996 base += bucketSize * etyp.Size_ / goarch.PtrSize 1997 1998 word := base 1999 mask[word/8] |= 1 << (word % 8) 2000 gcdata = &mask[0] 2001 ptrdata = (word + 1) * goarch.PtrSize 2002 2003 // overflow word must be last 2004 if ptrdata != size { 2005 panic("reflect: bad layout computation in MapOf") 2006 } 2007 } 2008 2009 b := &abi.Type{ 2010 Align_: goarch.PtrSize, 2011 Size_: size, 2012 Kind_: uint8(Struct), 2013 PtrBytes: ptrdata, 2014 GCData: gcdata, 2015 } 2016 s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")" 2017 b.Str = resolveReflectName(newName(s, "", false, false)) 2018 return b 2019 } 2020 2021 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2022 return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end] 2023 } 2024 2025 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2026 // offset base. 2027 func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) { 2028 if typ.Kind_&kindGCProg != 0 { 2029 panic("reflect: unexpected GC program") 2030 } 2031 ptrs := typ.PtrBytes / goarch.PtrSize 2032 words := typ.Size_ / goarch.PtrSize 2033 mask := typ.GcSlice(0, (ptrs+7)/8) 2034 for j := uintptr(0); j < ptrs; j++ { 2035 if (mask[j/8]>>(j%8))&1 != 0 { 2036 for i := uintptr(0); i < n; i++ { 2037 k := base + i*words + j 2038 out[k/8] |= 1 << (k % 8) 2039 } 2040 } 2041 } 2042 } 2043 2044 // appendGCProg appends the GC program for the first ptrdata bytes of 2045 // typ to dst and returns the extended slice. 2046 func appendGCProg(dst []byte, typ *abi.Type) []byte { 2047 if typ.Kind_&kindGCProg != 0 { 2048 // Element has GC program; emit one element. 2049 n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData))) 2050 prog := typ.GcSlice(4, 4+n-1) 2051 return append(dst, prog...) 2052 } 2053 2054 // Element is small with pointer mask; use as literal bits. 2055 ptrs := typ.PtrBytes / goarch.PtrSize 2056 mask := typ.GcSlice(0, (ptrs+7)/8) 2057 2058 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2059 for ; ptrs > 120; ptrs -= 120 { 2060 dst = append(dst, 120) 2061 dst = append(dst, mask[:15]...) 2062 mask = mask[15:] 2063 } 2064 2065 dst = append(dst, byte(ptrs)) 2066 dst = append(dst, mask...) 2067 return dst 2068 } 2069 2070 // SliceOf returns the slice type with element type t. 2071 // For example, if t represents int, SliceOf(t) represents []int. 2072 func SliceOf(t Type) Type { 2073 typ := t.common() 2074 2075 // Look in cache. 2076 ckey := cacheKey{Slice, typ, nil, 0} 2077 if slice, ok := lookupCache.Load(ckey); ok { 2078 return slice.(Type) 2079 } 2080 2081 // Look in known types. 2082 s := "[]" + stringFor(typ) 2083 for _, tt := range typesByString(s) { 2084 slice := (*sliceType)(unsafe.Pointer(tt)) 2085 if slice.Elem == typ { 2086 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 2087 return ti.(Type) 2088 } 2089 } 2090 2091 // Make a slice type. 2092 var islice any = ([]unsafe.Pointer)(nil) 2093 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2094 slice := *prototype 2095 slice.TFlag = 0 2096 slice.Str = resolveReflectName(newName(s, "", false, false)) 2097 slice.Hash = fnv1(typ.Hash, '[') 2098 slice.Elem = typ 2099 slice.PtrToThis = 0 2100 2101 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type)) 2102 return ti.(Type) 2103 } 2104 2105 // The structLookupCache caches StructOf lookups. 2106 // StructOf does not share the common lookupCache since we need to pin 2107 // the memory associated with *structTypeFixedN. 2108 var structLookupCache struct { 2109 sync.Mutex // Guards stores (but not loads) on m. 2110 2111 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2112 // Elements in m are append-only and thus safe for concurrent reading. 2113 m sync.Map 2114 } 2115 2116 type structTypeUncommon struct { 2117 structType 2118 u uncommonType 2119 } 2120 2121 // isLetter reports whether a given 'rune' is classified as a Letter. 2122 func isLetter(ch rune) bool { 2123 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2124 } 2125 2126 // isValidFieldName checks if a string is a valid (struct) field name or not. 2127 // 2128 // According to the language spec, a field name should be an identifier. 2129 // 2130 // identifier = letter { letter | unicode_digit } . 2131 // letter = unicode_letter | "_" . 2132 func isValidFieldName(fieldName string) bool { 2133 for i, c := range fieldName { 2134 if i == 0 && !isLetter(c) { 2135 return false 2136 } 2137 2138 if !(isLetter(c) || unicode.IsDigit(c)) { 2139 return false 2140 } 2141 } 2142 2143 return len(fieldName) > 0 2144 } 2145 2146 // StructOf returns the struct type containing fields. 2147 // The Offset and Index fields are ignored and computed as they would be 2148 // by the compiler. 2149 // 2150 // StructOf currently does not generate wrapper methods for embedded 2151 // fields and panics if passed unexported StructFields. 2152 // These limitations may be lifted in a future version. 2153 func StructOf(fields []StructField) Type { 2154 var ( 2155 hash = fnv1(0, []byte("struct {")...) 2156 size uintptr 2157 typalign uint8 2158 comparable = true 2159 methods []abi.Method 2160 2161 fs = make([]structField, len(fields)) 2162 repr = make([]byte, 0, 64) 2163 fset = map[string]struct{}{} // fields' names 2164 2165 hasGCProg = false // records whether a struct-field type has a GCProg 2166 ) 2167 2168 lastzero := uintptr(0) 2169 repr = append(repr, "struct {"...) 2170 pkgpath := "" 2171 for i, field := range fields { 2172 if field.Name == "" { 2173 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2174 } 2175 if !isValidFieldName(field.Name) { 2176 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2177 } 2178 if field.Type == nil { 2179 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2180 } 2181 f, fpkgpath := runtimeStructField(field) 2182 ft := f.Typ 2183 if ft.Kind_&kindGCProg != 0 { 2184 hasGCProg = true 2185 } 2186 if fpkgpath != "" { 2187 if pkgpath == "" { 2188 pkgpath = fpkgpath 2189 } else if pkgpath != fpkgpath { 2190 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2191 } 2192 } 2193 2194 // Update string and hash 2195 name := f.Name.Name() 2196 hash = fnv1(hash, []byte(name)...) 2197 repr = append(repr, (" " + name)...) 2198 if f.Embedded() { 2199 // Embedded field 2200 if f.Typ.Kind() == abi.Pointer { 2201 // Embedded ** and *interface{} are illegal 2202 elem := ft.Elem() 2203 if k := elem.Kind(); k == abi.Pointer || k == abi.Interface { 2204 panic("reflect.StructOf: illegal embedded field type " + stringFor(ft)) 2205 } 2206 } 2207 2208 switch Kind(f.Typ.Kind()) { 2209 case Interface: 2210 ift := (*interfaceType)(unsafe.Pointer(ft)) 2211 for im, m := range ift.Methods { 2212 if pkgPath(ift.nameOff(m.Name)) != "" { 2213 // TODO(sbinet). Issue 15924. 2214 panic("reflect: embedded interface with unexported method(s) not implemented") 2215 } 2216 2217 var ( 2218 mtyp = ift.typeOff(m.Typ) 2219 ifield = i 2220 imethod = im 2221 ifn Value 2222 tfn Value 2223 ) 2224 2225 if ft.Kind_&kindDirectIface != 0 { 2226 tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value { 2227 var args []Value 2228 var recv = in[0] 2229 if len(in) > 1 { 2230 args = in[1:] 2231 } 2232 return recv.Field(ifield).Method(imethod).Call(args) 2233 }) 2234 ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value { 2235 var args []Value 2236 var recv = in[0] 2237 if len(in) > 1 { 2238 args = in[1:] 2239 } 2240 return recv.Field(ifield).Method(imethod).Call(args) 2241 }) 2242 } else { 2243 tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value { 2244 var args []Value 2245 var recv = in[0] 2246 if len(in) > 1 { 2247 args = in[1:] 2248 } 2249 return recv.Field(ifield).Method(imethod).Call(args) 2250 }) 2251 ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value { 2252 var args []Value 2253 var recv = Indirect(in[0]) 2254 if len(in) > 1 { 2255 args = in[1:] 2256 } 2257 return recv.Field(ifield).Method(imethod).Call(args) 2258 }) 2259 } 2260 2261 methods = append(methods, abi.Method{ 2262 Name: resolveReflectName(ift.nameOff(m.Name)), 2263 Mtyp: resolveReflectType(mtyp), 2264 Ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2265 Tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2266 }) 2267 } 2268 case Pointer: 2269 ptr := (*ptrType)(unsafe.Pointer(ft)) 2270 if unt := ptr.Uncommon(); unt != nil { 2271 if i > 0 && unt.Mcount > 0 { 2272 // Issue 15924. 2273 panic("reflect: embedded type with methods not implemented if type is not first field") 2274 } 2275 if len(fields) > 1 { 2276 panic("reflect: embedded type with methods not implemented if there is more than one field") 2277 } 2278 for _, m := range unt.Methods() { 2279 mname := nameOffFor(ft, m.Name) 2280 if pkgPath(mname) != "" { 2281 // TODO(sbinet). 2282 // Issue 15924. 2283 panic("reflect: embedded interface with unexported method(s) not implemented") 2284 } 2285 methods = append(methods, abi.Method{ 2286 Name: resolveReflectName(mname), 2287 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), 2288 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), 2289 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), 2290 }) 2291 } 2292 } 2293 if unt := ptr.Elem.Uncommon(); unt != nil { 2294 for _, m := range unt.Methods() { 2295 mname := nameOffFor(ft, m.Name) 2296 if pkgPath(mname) != "" { 2297 // TODO(sbinet) 2298 // Issue 15924. 2299 panic("reflect: embedded interface with unexported method(s) not implemented") 2300 } 2301 methods = append(methods, abi.Method{ 2302 Name: resolveReflectName(mname), 2303 Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)), 2304 Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)), 2305 Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)), 2306 }) 2307 } 2308 } 2309 default: 2310 if unt := ft.Uncommon(); unt != nil { 2311 if i > 0 && unt.Mcount > 0 { 2312 // Issue 15924. 2313 panic("reflect: embedded type with methods not implemented if type is not first field") 2314 } 2315 if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 { 2316 panic("reflect: embedded type with methods not implemented for non-pointer type") 2317 } 2318 for _, m := range unt.Methods() { 2319 mname := nameOffFor(ft, m.Name) 2320 if pkgPath(mname) != "" { 2321 // TODO(sbinet) 2322 // Issue 15924. 2323 panic("reflect: embedded interface with unexported method(s) not implemented") 2324 } 2325 methods = append(methods, abi.Method{ 2326 Name: resolveReflectName(mname), 2327 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), 2328 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), 2329 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), 2330 }) 2331 2332 } 2333 } 2334 } 2335 } 2336 if _, dup := fset[name]; dup && name != "_" { 2337 panic("reflect.StructOf: duplicate field " + name) 2338 } 2339 fset[name] = struct{}{} 2340 2341 hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash)) 2342 2343 repr = append(repr, (" " + stringFor(ft))...) 2344 if f.Name.HasTag() { 2345 hash = fnv1(hash, []byte(f.Name.Tag())...) 2346 repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...) 2347 } 2348 if i < len(fields)-1 { 2349 repr = append(repr, ';') 2350 } 2351 2352 comparable = comparable && (ft.Equal != nil) 2353 2354 offset := align(size, uintptr(ft.Align_)) 2355 if offset < size { 2356 panic("reflect.StructOf: struct size would exceed virtual address space") 2357 } 2358 if ft.Align_ > typalign { 2359 typalign = ft.Align_ 2360 } 2361 size = offset + ft.Size_ 2362 if size < offset { 2363 panic("reflect.StructOf: struct size would exceed virtual address space") 2364 } 2365 f.Offset = offset 2366 2367 if ft.Size_ == 0 { 2368 lastzero = size 2369 } 2370 2371 fs[i] = f 2372 } 2373 2374 if size > 0 && lastzero == size { 2375 // This is a non-zero sized struct that ends in a 2376 // zero-sized field. We add an extra byte of padding, 2377 // to ensure that taking the address of the final 2378 // zero-sized field can't manufacture a pointer to the 2379 // next object in the heap. See issue 9401. 2380 size++ 2381 if size == 0 { 2382 panic("reflect.StructOf: struct size would exceed virtual address space") 2383 } 2384 } 2385 2386 var typ *structType 2387 var ut *uncommonType 2388 2389 if len(methods) == 0 { 2390 t := new(structTypeUncommon) 2391 typ = &t.structType 2392 ut = &t.u 2393 } else { 2394 // A *rtype representing a struct is followed directly in memory by an 2395 // array of method objects representing the methods attached to the 2396 // struct. To get the same layout for a run time generated type, we 2397 // need an array directly following the uncommonType memory. 2398 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2399 tt := New(StructOf([]StructField{ 2400 {Name: "S", Type: TypeOf(structType{})}, 2401 {Name: "U", Type: TypeOf(uncommonType{})}, 2402 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2403 })) 2404 2405 typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer()) 2406 ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer()) 2407 2408 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]abi.Method), methods) 2409 } 2410 // TODO(sbinet): Once we allow embedding multiple types, 2411 // methods will need to be sorted like the compiler does. 2412 // TODO(sbinet): Once we allow non-exported methods, we will 2413 // need to compute xcount as the number of exported methods. 2414 ut.Mcount = uint16(len(methods)) 2415 ut.Xcount = ut.Mcount 2416 ut.Moff = uint32(unsafe.Sizeof(uncommonType{})) 2417 2418 if len(fs) > 0 { 2419 repr = append(repr, ' ') 2420 } 2421 repr = append(repr, '}') 2422 hash = fnv1(hash, '}') 2423 str := string(repr) 2424 2425 // Round the size up to be a multiple of the alignment. 2426 s := align(size, uintptr(typalign)) 2427 if s < size { 2428 panic("reflect.StructOf: struct size would exceed virtual address space") 2429 } 2430 size = s 2431 2432 // Make the struct type. 2433 var istruct any = struct{}{} 2434 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2435 *typ = *prototype 2436 typ.Fields = fs 2437 if pkgpath != "" { 2438 typ.PkgPath = newName(pkgpath, "", false, false) 2439 } 2440 2441 // Look in cache. 2442 if ts, ok := structLookupCache.m.Load(hash); ok { 2443 for _, st := range ts.([]Type) { 2444 t := st.common() 2445 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2446 return toType(t) 2447 } 2448 } 2449 } 2450 2451 // Not in cache, lock and retry. 2452 structLookupCache.Lock() 2453 defer structLookupCache.Unlock() 2454 if ts, ok := structLookupCache.m.Load(hash); ok { 2455 for _, st := range ts.([]Type) { 2456 t := st.common() 2457 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2458 return toType(t) 2459 } 2460 } 2461 } 2462 2463 addToCache := func(t Type) Type { 2464 var ts []Type 2465 if ti, ok := structLookupCache.m.Load(hash); ok { 2466 ts = ti.([]Type) 2467 } 2468 structLookupCache.m.Store(hash, append(ts, t)) 2469 return t 2470 } 2471 2472 // Look in known types. 2473 for _, t := range typesByString(str) { 2474 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2475 // even if 't' wasn't a structType with methods, we should be ok 2476 // as the 'u uncommonType' field won't be accessed except when 2477 // tflag&abi.TFlagUncommon is set. 2478 return addToCache(toType(t)) 2479 } 2480 } 2481 2482 typ.Str = resolveReflectName(newName(str, "", false, false)) 2483 typ.TFlag = 0 // TODO: set tflagRegularMemory 2484 typ.Hash = hash 2485 typ.Size_ = size 2486 typ.PtrBytes = typeptrdata(&typ.Type) 2487 typ.Align_ = typalign 2488 typ.FieldAlign_ = typalign 2489 typ.PtrToThis = 0 2490 if len(methods) > 0 { 2491 typ.TFlag |= abi.TFlagUncommon 2492 } 2493 2494 if hasGCProg { 2495 lastPtrField := 0 2496 for i, ft := range fs { 2497 if ft.Typ.Pointers() { 2498 lastPtrField = i 2499 } 2500 } 2501 prog := []byte{0, 0, 0, 0} // will be length of prog 2502 var off uintptr 2503 for i, ft := range fs { 2504 if i > lastPtrField { 2505 // gcprog should not include anything for any field after 2506 // the last field that contains pointer data 2507 break 2508 } 2509 if !ft.Typ.Pointers() { 2510 // Ignore pointerless fields. 2511 continue 2512 } 2513 // Pad to start of this field with zeros. 2514 if ft.Offset > off { 2515 n := (ft.Offset - off) / goarch.PtrSize 2516 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2517 if n > 1 { 2518 prog = append(prog, 0x81) // repeat previous bit 2519 prog = appendVarint(prog, n-1) // n-1 times 2520 } 2521 off = ft.Offset 2522 } 2523 2524 prog = appendGCProg(prog, ft.Typ) 2525 off += ft.Typ.PtrBytes 2526 } 2527 prog = append(prog, 0) 2528 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2529 typ.Kind_ |= kindGCProg 2530 typ.GCData = &prog[0] 2531 } else { 2532 typ.Kind_ &^= kindGCProg 2533 bv := new(bitVector) 2534 addTypeBits(bv, 0, &typ.Type) 2535 if len(bv.data) > 0 { 2536 typ.GCData = &bv.data[0] 2537 } 2538 } 2539 typ.Equal = nil 2540 if comparable { 2541 typ.Equal = func(p, q unsafe.Pointer) bool { 2542 for _, ft := range typ.Fields { 2543 pi := add(p, ft.Offset, "&x.field safe") 2544 qi := add(q, ft.Offset, "&x.field safe") 2545 if !ft.Typ.Equal(pi, qi) { 2546 return false 2547 } 2548 } 2549 return true 2550 } 2551 } 2552 2553 switch { 2554 case len(fs) == 1 && !ifaceIndir(fs[0].Typ): 2555 // structs of 1 direct iface type can be direct 2556 typ.Kind_ |= kindDirectIface 2557 default: 2558 typ.Kind_ &^= kindDirectIface 2559 } 2560 2561 return addToCache(toType(&typ.Type)) 2562 } 2563 2564 // runtimeStructField takes a StructField value passed to StructOf and 2565 // returns both the corresponding internal representation, of type 2566 // structField, and the pkgpath value to use for this field. 2567 func runtimeStructField(field StructField) (structField, string) { 2568 if field.Anonymous && field.PkgPath != "" { 2569 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2570 } 2571 2572 if field.IsExported() { 2573 // Best-effort check for misuse. 2574 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2575 c := field.Name[0] 2576 if 'a' <= c && c <= 'z' || c == '_' { 2577 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2578 } 2579 } 2580 2581 resolveReflectType(field.Type.common()) // install in runtime 2582 f := structField{ 2583 Name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous), 2584 Typ: field.Type.common(), 2585 Offset: 0, 2586 } 2587 return f, field.PkgPath 2588 } 2589 2590 // typeptrdata returns the length in bytes of the prefix of t 2591 // containing pointer data. Anything after this offset is scalar data. 2592 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go 2593 func typeptrdata(t *abi.Type) uintptr { 2594 switch t.Kind() { 2595 case abi.Struct: 2596 st := (*structType)(unsafe.Pointer(t)) 2597 // find the last field that has pointers. 2598 field := -1 2599 for i := range st.Fields { 2600 ft := st.Fields[i].Typ 2601 if ft.Pointers() { 2602 field = i 2603 } 2604 } 2605 if field == -1 { 2606 return 0 2607 } 2608 f := st.Fields[field] 2609 return f.Offset + f.Typ.PtrBytes 2610 2611 default: 2612 panic("reflect.typeptrdata: unexpected type, " + stringFor(t)) 2613 } 2614 } 2615 2616 // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. 2617 const maxPtrmaskBytes = 2048 2618 2619 // ArrayOf returns the array type with the given length and element type. 2620 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2621 // 2622 // If the resulting type would be larger than the available address space, 2623 // ArrayOf panics. 2624 func ArrayOf(length int, elem Type) Type { 2625 if length < 0 { 2626 panic("reflect: negative length passed to ArrayOf") 2627 } 2628 2629 typ := elem.common() 2630 2631 // Look in cache. 2632 ckey := cacheKey{Array, typ, nil, uintptr(length)} 2633 if array, ok := lookupCache.Load(ckey); ok { 2634 return array.(Type) 2635 } 2636 2637 // Look in known types. 2638 s := "[" + strconv.Itoa(length) + "]" + stringFor(typ) 2639 for _, tt := range typesByString(s) { 2640 array := (*arrayType)(unsafe.Pointer(tt)) 2641 if array.Elem == typ { 2642 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 2643 return ti.(Type) 2644 } 2645 } 2646 2647 // Make an array type. 2648 var iarray any = [1]unsafe.Pointer{} 2649 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2650 array := *prototype 2651 array.TFlag = typ.TFlag & abi.TFlagRegularMemory 2652 array.Str = resolveReflectName(newName(s, "", false, false)) 2653 array.Hash = fnv1(typ.Hash, '[') 2654 for n := uint32(length); n > 0; n >>= 8 { 2655 array.Hash = fnv1(array.Hash, byte(n)) 2656 } 2657 array.Hash = fnv1(array.Hash, ']') 2658 array.Elem = typ 2659 array.PtrToThis = 0 2660 if typ.Size_ > 0 { 2661 max := ^uintptr(0) / typ.Size_ 2662 if uintptr(length) > max { 2663 panic("reflect.ArrayOf: array size would exceed virtual address space") 2664 } 2665 } 2666 array.Size_ = typ.Size_ * uintptr(length) 2667 if length > 0 && typ.PtrBytes != 0 { 2668 array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes 2669 } 2670 array.Align_ = typ.Align_ 2671 array.FieldAlign_ = typ.FieldAlign_ 2672 array.Len = uintptr(length) 2673 array.Slice = &(SliceOf(elem).(*rtype).t) 2674 2675 switch { 2676 case typ.PtrBytes == 0 || array.Size_ == 0: 2677 // No pointers. 2678 array.GCData = nil 2679 array.PtrBytes = 0 2680 2681 case length == 1: 2682 // In memory, 1-element array looks just like the element. 2683 array.Kind_ |= typ.Kind_ & kindGCProg 2684 array.GCData = typ.GCData 2685 array.PtrBytes = typ.PtrBytes 2686 2687 case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: 2688 // Element is small with pointer mask; array is still small. 2689 // Create direct pointer mask by turning each 1 bit in elem 2690 // into length 1 bits in larger mask. 2691 n := (array.PtrBytes/goarch.PtrSize + 7) / 8 2692 // Runtime needs pointer masks to be a multiple of uintptr in size. 2693 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) 2694 mask := make([]byte, n) 2695 emitGCMask(mask, 0, typ, array.Len) 2696 array.GCData = &mask[0] 2697 2698 default: 2699 // Create program that emits one element 2700 // and then repeats to make the array. 2701 prog := []byte{0, 0, 0, 0} // will be length of prog 2702 prog = appendGCProg(prog, typ) 2703 // Pad from ptrdata to size. 2704 elemPtrs := typ.PtrBytes / goarch.PtrSize 2705 elemWords := typ.Size_ / goarch.PtrSize 2706 if elemPtrs < elemWords { 2707 // Emit literal 0 bit, then repeat as needed. 2708 prog = append(prog, 0x01, 0x00) 2709 if elemPtrs+1 < elemWords { 2710 prog = append(prog, 0x81) 2711 prog = appendVarint(prog, elemWords-elemPtrs-1) 2712 } 2713 } 2714 // Repeat length-1 times. 2715 if elemWords < 0x80 { 2716 prog = append(prog, byte(elemWords|0x80)) 2717 } else { 2718 prog = append(prog, 0x80) 2719 prog = appendVarint(prog, elemWords) 2720 } 2721 prog = appendVarint(prog, uintptr(length)-1) 2722 prog = append(prog, 0) 2723 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2724 array.Kind_ |= kindGCProg 2725 array.GCData = &prog[0] 2726 array.PtrBytes = array.Size_ // overestimate but ok; must match program 2727 } 2728 2729 etyp := typ 2730 esize := etyp.Size() 2731 2732 array.Equal = nil 2733 if eequal := etyp.Equal; eequal != nil { 2734 array.Equal = func(p, q unsafe.Pointer) bool { 2735 for i := 0; i < length; i++ { 2736 pi := arrayAt(p, i, esize, "i < length") 2737 qi := arrayAt(q, i, esize, "i < length") 2738 if !eequal(pi, qi) { 2739 return false 2740 } 2741 2742 } 2743 return true 2744 } 2745 } 2746 2747 switch { 2748 case length == 1 && !ifaceIndir(typ): 2749 // array of 1 direct iface type can be direct 2750 array.Kind_ |= kindDirectIface 2751 default: 2752 array.Kind_ &^= kindDirectIface 2753 } 2754 2755 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type)) 2756 return ti.(Type) 2757 } 2758 2759 func appendVarint(x []byte, v uintptr) []byte { 2760 for ; v >= 0x80; v >>= 7 { 2761 x = append(x, byte(v|0x80)) 2762 } 2763 x = append(x, byte(v)) 2764 return x 2765 } 2766 2767 // toType converts from a *rtype to a Type that can be returned 2768 // to the client of package reflect. In gc, the only concern is that 2769 // a nil *rtype must be replaced by a nil Type, but in gccgo this 2770 // function takes care of ensuring that multiple *rtype for the same 2771 // type are coalesced into a single Type. 2772 func toType(t *abi.Type) Type { 2773 if t == nil { 2774 return nil 2775 } 2776 return toRType(t) 2777 } 2778 2779 type layoutKey struct { 2780 ftyp *funcType // function signature 2781 rcvr *abi.Type // receiver type, or nil if none 2782 } 2783 2784 type layoutType struct { 2785 t *abi.Type 2786 framePool *sync.Pool 2787 abid abiDesc 2788 } 2789 2790 var layoutCache sync.Map // map[layoutKey]layoutType 2791 2792 // funcLayout computes a struct type representing the layout of the 2793 // stack-assigned function arguments and return values for the function 2794 // type t. 2795 // If rcvr != nil, rcvr specifies the type of the receiver. 2796 // The returned type exists only for GC, so we only fill out GC relevant info. 2797 // Currently, that's just size and the GC program. We also fill in 2798 // the name for possible debugging use. 2799 func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) { 2800 if t.Kind() != abi.Func { 2801 panic("reflect: funcLayout of non-func type " + stringFor(&t.Type)) 2802 } 2803 if rcvr != nil && rcvr.Kind() == abi.Interface { 2804 panic("reflect: funcLayout with interface receiver " + stringFor(rcvr)) 2805 } 2806 k := layoutKey{t, rcvr} 2807 if lti, ok := layoutCache.Load(k); ok { 2808 lt := lti.(layoutType) 2809 return lt.t, lt.framePool, lt.abid 2810 } 2811 2812 // Compute the ABI layout. 2813 abid = newAbiDesc(t, rcvr) 2814 2815 // build dummy rtype holding gc program 2816 x := &abi.Type{ 2817 Align_: goarch.PtrSize, 2818 // Don't add spill space here; it's only necessary in 2819 // reflectcall's frame, not in the allocated frame. 2820 // TODO(mknyszek): Remove this comment when register 2821 // spill space in the frame is no longer required. 2822 Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize), 2823 PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize, 2824 } 2825 if abid.stackPtrs.n > 0 { 2826 x.GCData = &abid.stackPtrs.data[0] 2827 } 2828 2829 var s string 2830 if rcvr != nil { 2831 s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")" 2832 } else { 2833 s = "funcargs(" + stringFor(&t.Type) + ")" 2834 } 2835 x.Str = resolveReflectName(newName(s, "", false, false)) 2836 2837 // cache result for future callers 2838 framePool = &sync.Pool{New: func() any { 2839 return unsafe_New(x) 2840 }} 2841 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 2842 t: x, 2843 framePool: framePool, 2844 abid: abid, 2845 }) 2846 lt := lti.(layoutType) 2847 return lt.t, lt.framePool, lt.abid 2848 } 2849 2850 // ifaceIndir reports whether t is stored indirectly in an interface value. 2851 func ifaceIndir(t *abi.Type) bool { 2852 return t.Kind_&kindDirectIface == 0 2853 } 2854 2855 // Note: this type must agree with runtime.bitvector. 2856 type bitVector struct { 2857 n uint32 // number of bits 2858 data []byte 2859 } 2860 2861 // append a bit to the bitmap. 2862 func (bv *bitVector) append(bit uint8) { 2863 if bv.n%(8*goarch.PtrSize) == 0 { 2864 // Runtime needs pointer masks to be a multiple of uintptr in size. 2865 // Since reflect passes bv.data directly to the runtime as a pointer mask, 2866 // we append a full uintptr of zeros at a time. 2867 for i := 0; i < goarch.PtrSize; i++ { 2868 bv.data = append(bv.data, 0) 2869 } 2870 } 2871 bv.data[bv.n/8] |= bit << (bv.n % 8) 2872 bv.n++ 2873 } 2874 2875 func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { 2876 if t.PtrBytes == 0 { 2877 return 2878 } 2879 2880 switch Kind(t.Kind_ & kindMask) { 2881 case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: 2882 // 1 pointer at start of representation 2883 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 2884 bv.append(0) 2885 } 2886 bv.append(1) 2887 2888 case Interface: 2889 // 2 pointers 2890 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 2891 bv.append(0) 2892 } 2893 bv.append(1) 2894 bv.append(1) 2895 2896 case Array: 2897 // repeat inner type 2898 tt := (*arrayType)(unsafe.Pointer(t)) 2899 for i := 0; i < int(tt.Len); i++ { 2900 addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem) 2901 } 2902 2903 case Struct: 2904 // apply fields 2905 tt := (*structType)(unsafe.Pointer(t)) 2906 for i := range tt.Fields { 2907 f := &tt.Fields[i] 2908 addTypeBits(bv, offset+f.Offset, f.Typ) 2909 } 2910 } 2911 }