github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/reflect/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "internal/abi" 20 "internal/goarch" 21 "strconv" 22 "sync" 23 "unicode" 24 "unicode/utf8" 25 "unsafe" 26 ) 27 28 // Type is the representation of a Go type. 29 // 30 // Not all methods apply to all kinds of types. Restrictions, 31 // if any, are noted in the documentation for each method. 32 // Use the Kind method to find out the kind of type before 33 // calling kind-specific methods. Calling a method 34 // inappropriate to the kind of type causes a run-time panic. 35 // 36 // Type values are comparable, such as with the == operator, 37 // so they can be used as map keys. 38 // Two Type values are equal if they represent identical types. 39 type Type interface { 40 // Methods applicable to all types. 41 42 // Align returns the alignment in bytes of a value of 43 // this type when allocated in memory. 44 Align() int 45 46 // FieldAlign returns the alignment in bytes of a value of 47 // this type when used as a field in a struct. 48 FieldAlign() int 49 50 // Method returns the i'th method in the type's method set. 51 // It panics if i is not in the range [0, NumMethod()). 52 // 53 // For a non-interface type T or *T, the returned Method's Type and Func 54 // fields describe a function whose first argument is the receiver, 55 // and only exported methods are accessible. 56 // 57 // For an interface type, the returned Method's Type field gives the 58 // method signature, without a receiver, and the Func field is nil. 59 // 60 // Methods are sorted in lexicographic order. 61 Method(int) Method 62 63 // MethodByName returns the method with that name in the type's 64 // method set and a boolean indicating if the method was found. 65 // 66 // For a non-interface type T or *T, the returned Method's Type and Func 67 // fields describe a function whose first argument is the receiver. 68 // 69 // For an interface type, the returned Method's Type field gives the 70 // method signature, without a receiver, and the Func field is nil. 71 MethodByName(string) (Method, bool) 72 73 // NumMethod returns the number of methods accessible using Method. 74 // 75 // For a non-interface type, it returns the number of exported methods. 76 // 77 // For an interface type, it returns the number of exported and unexported methods. 78 NumMethod() int 79 80 // Name returns the type's name within its package for a defined type. 81 // For other (non-defined) types it returns the empty string. 82 Name() string 83 84 // PkgPath returns a defined type's package path, that is, the import path 85 // that uniquely identifies the package, such as "encoding/base64". 86 // If the type was predeclared (string, error) or not defined (*T, struct{}, 87 // []int, or A where A is an alias for a non-defined type), the package path 88 // will be the empty string. 89 PkgPath() string 90 91 // Size returns the number of bytes needed to store 92 // a value of the given type; it is analogous to unsafe.Sizeof. 93 Size() uintptr 94 95 // String returns a string representation of the type. 96 // The string representation may use shortened package names 97 // (e.g., base64 instead of "encoding/base64") and is not 98 // guaranteed to be unique among types. To test for type identity, 99 // compare the Types directly. 100 String() string 101 102 // Kind returns the specific kind of this type. 103 Kind() Kind 104 105 // Implements reports whether the type implements the interface type u. 106 Implements(u Type) bool 107 108 // AssignableTo reports whether a value of the type is assignable to type u. 109 AssignableTo(u Type) bool 110 111 // ConvertibleTo reports whether a value of the type is convertible to type u. 112 // Even if ConvertibleTo returns true, the conversion may still panic. 113 // For example, a slice of type []T is convertible to *[N]T, 114 // but the conversion will panic if its length is less than N. 115 ConvertibleTo(u Type) bool 116 117 // Comparable reports whether values of this type are comparable. 118 // Even if Comparable returns true, the comparison may still panic. 119 // For example, values of interface type are comparable, 120 // but the comparison will panic if their dynamic type is not comparable. 121 Comparable() bool 122 123 // Methods applicable only to some types, depending on Kind. 124 // The methods allowed for each kind are: 125 // 126 // Int*, Uint*, Float*, Complex*: Bits 127 // Array: Elem, Len 128 // Chan: ChanDir, Elem 129 // Func: In, NumIn, Out, NumOut, IsVariadic. 130 // Map: Key, Elem 131 // Pointer: Elem 132 // Slice: Elem 133 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 134 135 // Bits returns the size of the type in bits. 136 // It panics if the type's Kind is not one of the 137 // sized or unsized Int, Uint, Float, or Complex kinds. 138 Bits() int 139 140 // ChanDir returns a channel type's direction. 141 // It panics if the type's Kind is not Chan. 142 ChanDir() ChanDir 143 144 // IsVariadic reports whether a function type's final input parameter 145 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 146 // implicit actual type []T. 147 // 148 // For concreteness, if t represents func(x int, y ... float64), then 149 // 150 // t.NumIn() == 2 151 // t.In(0) is the reflect.Type for "int" 152 // t.In(1) is the reflect.Type for "[]float64" 153 // t.IsVariadic() == true 154 // 155 // IsVariadic panics if the type's Kind is not Func. 156 IsVariadic() bool 157 158 // Elem returns a type's element type. 159 // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. 160 Elem() Type 161 162 // Field returns a struct type's i'th field. 163 // It panics if the type's Kind is not Struct. 164 // It panics if i is not in the range [0, NumField()). 165 Field(i int) StructField 166 167 // FieldByIndex returns the nested field corresponding 168 // to the index sequence. It is equivalent to calling Field 169 // successively for each index i. 170 // It panics if the type's Kind is not Struct. 171 FieldByIndex(index []int) StructField 172 173 // FieldByName returns the struct field with the given name 174 // and a boolean indicating if the field was found. 175 // If the returned field is promoted from an embedded struct, 176 // then Offset in the returned StructField is the offset in 177 // the embedded struct. 178 FieldByName(name string) (StructField, bool) 179 180 // FieldByNameFunc returns the struct field with a name 181 // that satisfies the match function and a boolean indicating if 182 // the field was found. 183 // 184 // FieldByNameFunc considers the fields in the struct itself 185 // and then the fields in any embedded structs, in breadth first order, 186 // stopping at the shallowest nesting depth containing one or more 187 // fields satisfying the match function. If multiple fields at that depth 188 // satisfy the match function, they cancel each other 189 // and FieldByNameFunc returns no match. 190 // This behavior mirrors Go's handling of name lookup in 191 // structs containing embedded fields. 192 // 193 // If the returned field is promoted from an embedded struct, 194 // then Offset in the returned StructField is the offset in 195 // the embedded struct. 196 FieldByNameFunc(match func(string) bool) (StructField, bool) 197 198 // In returns the type of a function type's i'th input parameter. 199 // It panics if the type's Kind is not Func. 200 // It panics if i is not in the range [0, NumIn()). 201 In(i int) Type 202 203 // Key returns a map type's key type. 204 // It panics if the type's Kind is not Map. 205 Key() Type 206 207 // Len returns an array type's length. 208 // It panics if the type's Kind is not Array. 209 Len() int 210 211 // NumField returns a struct type's field count. 212 // It panics if the type's Kind is not Struct. 213 NumField() int 214 215 // NumIn returns a function type's input parameter count. 216 // It panics if the type's Kind is not Func. 217 NumIn() int 218 219 // NumOut returns a function type's output parameter count. 220 // It panics if the type's Kind is not Func. 221 NumOut() int 222 223 // Out returns the type of a function type's i'th output parameter. 224 // It panics if the type's Kind is not Func. 225 // It panics if i is not in the range [0, NumOut()). 226 Out(i int) Type 227 228 // OverflowComplex reports whether the complex128 x cannot be represented by type t. 229 // It panics if t's Kind is not Complex64 or Complex128. 230 OverflowComplex(x complex128) bool 231 232 // OverflowFloat reports whether the float64 x cannot be represented by type t. 233 // It panics if t's Kind is not Float32 or Float64. 234 OverflowFloat(x float64) bool 235 236 // OverflowInt reports whether the int64 x cannot be represented by type t. 237 // It panics if t's Kind is not Int, Int8, Int16, Int32, or Int64. 238 OverflowInt(x int64) bool 239 240 // OverflowUint reports whether the uint64 x cannot be represented by type t. 241 // It panics if t's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64. 242 OverflowUint(x uint64) bool 243 244 common() *abi.Type 245 uncommon() *uncommonType 246 } 247 248 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 249 // if the names are equal, even if they are unexported names originating 250 // in different packages. The practical effect of this is that the result of 251 // t.FieldByName("x") is not well defined if the struct type t contains 252 // multiple fields named x (embedded from different packages). 253 // FieldByName may return one of the fields named x or may report that there are none. 254 // See https://golang.org/issue/4876 for more details. 255 256 /* 257 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). 258 * A few are known to ../runtime/type.go to convey to debuggers. 259 * They are also known to ../runtime/type.go. 260 */ 261 262 // A Kind represents the specific kind of type that a [Type] represents. 263 // The zero Kind is not a valid kind. 264 type Kind uint 265 266 const ( 267 Invalid Kind = iota 268 Bool 269 Int 270 Int8 271 Int16 272 Int32 273 Int64 274 Uint 275 Uint8 276 Uint16 277 Uint32 278 Uint64 279 Uintptr 280 Float32 281 Float64 282 Complex64 283 Complex128 284 Array 285 Chan 286 Func 287 Interface 288 Map 289 Pointer 290 Slice 291 String 292 Struct 293 UnsafePointer 294 ) 295 296 // Ptr is the old name for the [Pointer] kind. 297 const Ptr = Pointer 298 299 // uncommonType is present only for defined types or types with methods 300 // (if T is a defined type, the uncommonTypes for T and *T have methods). 301 // Using a pointer to this struct reduces the overall size required 302 // to describe a non-defined type with no methods. 303 type uncommonType = abi.UncommonType 304 305 // Embed this type to get common/uncommon 306 type common struct { 307 abi.Type 308 } 309 310 // rtype is the common implementation of most values. 311 // It is embedded in other struct types. 312 type rtype struct { 313 t abi.Type 314 } 315 316 func (t *rtype) common() *abi.Type { 317 return &t.t 318 } 319 320 func (t *rtype) uncommon() *abi.UncommonType { 321 return t.t.Uncommon() 322 } 323 324 type aNameOff = abi.NameOff 325 type aTypeOff = abi.TypeOff 326 type aTextOff = abi.TextOff 327 328 // ChanDir represents a channel type's direction. 329 type ChanDir int 330 331 const ( 332 RecvDir ChanDir = 1 << iota // <-chan 333 SendDir // chan<- 334 BothDir = RecvDir | SendDir // chan 335 ) 336 337 // arrayType represents a fixed array type. 338 type arrayType = abi.ArrayType 339 340 // chanType represents a channel type. 341 type chanType = abi.ChanType 342 343 // funcType represents a function type. 344 // 345 // A *rtype for each in and out parameter is stored in an array that 346 // directly follows the funcType (and possibly its uncommonType). So 347 // a function type with one method, one input, and one output is: 348 // 349 // struct { 350 // funcType 351 // uncommonType 352 // [2]*rtype // [0] is in, [1] is out 353 // } 354 type funcType = abi.FuncType 355 356 // interfaceType represents an interface type. 357 type interfaceType struct { 358 abi.InterfaceType // can embed directly because not a public type. 359 } 360 361 func (t *interfaceType) nameOff(off aNameOff) abi.Name { 362 return toRType(&t.Type).nameOff(off) 363 } 364 365 func nameOffFor(t *abi.Type, off aNameOff) abi.Name { 366 return toRType(t).nameOff(off) 367 } 368 369 func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type { 370 return toRType(t).typeOff(off) 371 } 372 373 func (t *interfaceType) typeOff(off aTypeOff) *abi.Type { 374 return toRType(&t.Type).typeOff(off) 375 } 376 377 func (t *interfaceType) common() *abi.Type { 378 return &t.Type 379 } 380 381 func (t *interfaceType) uncommon() *abi.UncommonType { 382 return t.Uncommon() 383 } 384 385 // mapType represents a map type. 386 type mapType struct { 387 abi.MapType 388 } 389 390 // ptrType represents a pointer type. 391 type ptrType struct { 392 abi.PtrType 393 } 394 395 // sliceType represents a slice type. 396 type sliceType struct { 397 abi.SliceType 398 } 399 400 // Struct field 401 type structField = abi.StructField 402 403 // structType represents a struct type. 404 type structType struct { 405 abi.StructType 406 } 407 408 func pkgPath(n abi.Name) string { 409 if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 { 410 return "" 411 } 412 i, l := n.ReadVarint(1) 413 off := 1 + i + l 414 if n.HasTag() { 415 i2, l2 := n.ReadVarint(off) 416 off += i2 + l2 417 } 418 var nameOff int32 419 // Note that this field may not be aligned in memory, 420 // so we cannot use a direct int32 assignment here. 421 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:]) 422 pkgPathName := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))} 423 return pkgPathName.Name() 424 } 425 426 func newName(n, tag string, exported, embedded bool) abi.Name { 427 return abi.NewName(n, tag, exported, embedded) 428 } 429 430 /* 431 * The compiler knows the exact layout of all the data structures above. 432 * The compiler does not know about the data structures and methods below. 433 */ 434 435 // Method represents a single method. 436 type Method struct { 437 // Name is the method name. 438 Name string 439 440 // PkgPath is the package path that qualifies a lower case (unexported) 441 // method name. It is empty for upper case (exported) method names. 442 // The combination of PkgPath and Name uniquely identifies a method 443 // in a method set. 444 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 445 PkgPath string 446 447 Type Type // method type 448 Func Value // func with receiver as first argument 449 Index int // index for Type.Method 450 } 451 452 // IsExported reports whether the method is exported. 453 func (m Method) IsExported() bool { 454 return m.PkgPath == "" 455 } 456 457 const ( 458 kindDirectIface = 1 << 5 459 kindGCProg = 1 << 6 // Type.gc points to GC program 460 kindMask = (1 << 5) - 1 461 ) 462 463 // String returns the name of k. 464 func (k Kind) String() string { 465 if uint(k) < uint(len(kindNames)) { 466 return kindNames[uint(k)] 467 } 468 return "kind" + strconv.Itoa(int(k)) 469 } 470 471 var kindNames = []string{ 472 Invalid: "invalid", 473 Bool: "bool", 474 Int: "int", 475 Int8: "int8", 476 Int16: "int16", 477 Int32: "int32", 478 Int64: "int64", 479 Uint: "uint", 480 Uint8: "uint8", 481 Uint16: "uint16", 482 Uint32: "uint32", 483 Uint64: "uint64", 484 Uintptr: "uintptr", 485 Float32: "float32", 486 Float64: "float64", 487 Complex64: "complex64", 488 Complex128: "complex128", 489 Array: "array", 490 Chan: "chan", 491 Func: "func", 492 Interface: "interface", 493 Map: "map", 494 Pointer: "ptr", 495 Slice: "slice", 496 String: "string", 497 Struct: "struct", 498 UnsafePointer: "unsafe.Pointer", 499 } 500 501 // resolveNameOff resolves a name offset from a base pointer. 502 // The (*rtype).nameOff method is a convenience wrapper for this function. 503 // Implemented in the runtime package. 504 // 505 //go:noescape 506 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 507 508 // resolveTypeOff resolves an *rtype offset from a base type. 509 // The (*rtype).typeOff method is a convenience wrapper for this function. 510 // Implemented in the runtime package. 511 // 512 //go:noescape 513 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 514 515 // resolveTextOff resolves a function pointer offset from a base type. 516 // The (*rtype).textOff method is a convenience wrapper for this function. 517 // Implemented in the runtime package. 518 // 519 //go:noescape 520 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 521 522 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 523 // It returns a new ID that can be used as a typeOff or textOff, and will 524 // be resolved correctly. Implemented in the runtime package. 525 // 526 //go:noescape 527 func addReflectOff(ptr unsafe.Pointer) int32 528 529 // resolveReflectName adds a name to the reflection lookup map in the runtime. 530 // It returns a new nameOff that can be used to refer to the pointer. 531 func resolveReflectName(n abi.Name) aNameOff { 532 return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes))) 533 } 534 535 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 536 // It returns a new typeOff that can be used to refer to the pointer. 537 func resolveReflectType(t *abi.Type) aTypeOff { 538 return aTypeOff(addReflectOff(unsafe.Pointer(t))) 539 } 540 541 // resolveReflectText adds a function pointer to the reflection lookup map in 542 // the runtime. It returns a new textOff that can be used to refer to the 543 // pointer. 544 func resolveReflectText(ptr unsafe.Pointer) aTextOff { 545 return aTextOff(addReflectOff(ptr)) 546 } 547 548 func (t *rtype) nameOff(off aNameOff) abi.Name { 549 return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 550 } 551 552 func (t *rtype) typeOff(off aTypeOff) *abi.Type { 553 return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 554 } 555 556 func (t *rtype) textOff(off aTextOff) unsafe.Pointer { 557 return resolveTextOff(unsafe.Pointer(t), int32(off)) 558 } 559 560 func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer { 561 return toRType(t).textOff(off) 562 } 563 564 func (t *rtype) String() string { 565 s := t.nameOff(t.t.Str).Name() 566 if t.t.TFlag&abi.TFlagExtraStar != 0 { 567 return s[1:] 568 } 569 return s 570 } 571 572 func (t *rtype) Size() uintptr { return t.t.Size() } 573 574 func (t *rtype) Bits() int { 575 if t == nil { 576 panic("reflect: Bits of nil Type") 577 } 578 k := t.Kind() 579 if k < Int || k > Complex128 { 580 panic("reflect: Bits of non-arithmetic Type " + t.String()) 581 } 582 return int(t.t.Size_) * 8 583 } 584 585 func (t *rtype) Align() int { return t.t.Align() } 586 587 func (t *rtype) FieldAlign() int { return t.t.FieldAlign() } 588 589 func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) } 590 591 func (t *rtype) exportedMethods() []abi.Method { 592 ut := t.uncommon() 593 if ut == nil { 594 return nil 595 } 596 return ut.ExportedMethods() 597 } 598 599 func (t *rtype) NumMethod() int { 600 if t.Kind() == Interface { 601 tt := (*interfaceType)(unsafe.Pointer(t)) 602 return tt.NumMethod() 603 } 604 return len(t.exportedMethods()) 605 } 606 607 func (t *rtype) Method(i int) (m Method) { 608 if t.Kind() == Interface { 609 tt := (*interfaceType)(unsafe.Pointer(t)) 610 return tt.Method(i) 611 } 612 methods := t.exportedMethods() 613 if i < 0 || i >= len(methods) { 614 panic("reflect: Method index out of range") 615 } 616 p := methods[i] 617 pname := t.nameOff(p.Name) 618 m.Name = pname.Name() 619 fl := flag(Func) 620 mtyp := t.typeOff(p.Mtyp) 621 ft := (*funcType)(unsafe.Pointer(mtyp)) 622 in := make([]Type, 0, 1+ft.NumIn()) 623 in = append(in, t) 624 for _, arg := range ft.InSlice() { 625 in = append(in, toRType(arg)) 626 } 627 out := make([]Type, 0, ft.NumOut()) 628 for _, ret := range ft.OutSlice() { 629 out = append(out, toRType(ret)) 630 } 631 mt := FuncOf(in, out, ft.IsVariadic()) 632 m.Type = mt 633 tfn := t.textOff(p.Tfn) 634 fn := unsafe.Pointer(&tfn) 635 m.Func = Value{&mt.(*rtype).t, fn, fl} 636 637 m.Index = i 638 return m 639 } 640 641 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 642 if t.Kind() == Interface { 643 tt := (*interfaceType)(unsafe.Pointer(t)) 644 return tt.MethodByName(name) 645 } 646 ut := t.uncommon() 647 if ut == nil { 648 return Method{}, false 649 } 650 651 methods := ut.ExportedMethods() 652 653 // We are looking for the first index i where the string becomes >= s. 654 // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name). 655 i, j := 0, len(methods) 656 for i < j { 657 h := int(uint(i+j) >> 1) // avoid overflow when computing h 658 // i ≤ h < j 659 if !(t.nameOff(methods[h].Name).Name() >= name) { 660 i = h + 1 // preserves f(i-1) == false 661 } else { 662 j = h // preserves f(j) == true 663 } 664 } 665 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 666 if i < len(methods) && name == t.nameOff(methods[i].Name).Name() { 667 return t.Method(i), true 668 } 669 670 return Method{}, false 671 } 672 673 func (t *rtype) PkgPath() string { 674 if t.t.TFlag&abi.TFlagNamed == 0 { 675 return "" 676 } 677 ut := t.uncommon() 678 if ut == nil { 679 return "" 680 } 681 return t.nameOff(ut.PkgPath).Name() 682 } 683 684 func pkgPathFor(t *abi.Type) string { 685 return toRType(t).PkgPath() 686 } 687 688 func (t *rtype) Name() string { 689 if !t.t.HasName() { 690 return "" 691 } 692 s := t.String() 693 i := len(s) - 1 694 sqBrackets := 0 695 for i >= 0 && (s[i] != '.' || sqBrackets != 0) { 696 switch s[i] { 697 case ']': 698 sqBrackets++ 699 case '[': 700 sqBrackets-- 701 } 702 i-- 703 } 704 return s[i+1:] 705 } 706 707 func nameFor(t *abi.Type) string { 708 return toRType(t).Name() 709 } 710 711 func (t *rtype) ChanDir() ChanDir { 712 if t.Kind() != Chan { 713 panic("reflect: ChanDir of non-chan type " + t.String()) 714 } 715 tt := (*abi.ChanType)(unsafe.Pointer(t)) 716 return ChanDir(tt.Dir) 717 } 718 719 func toRType(t *abi.Type) *rtype { 720 return (*rtype)(unsafe.Pointer(t)) 721 } 722 723 func elem(t *abi.Type) *abi.Type { 724 et := t.Elem() 725 if et != nil { 726 return et 727 } 728 panic("reflect: Elem of invalid type " + stringFor(t)) 729 } 730 731 func (t *rtype) Elem() Type { 732 return toType(elem(t.common())) 733 } 734 735 func (t *rtype) Field(i int) StructField { 736 if t.Kind() != Struct { 737 panic("reflect: Field of non-struct type " + t.String()) 738 } 739 tt := (*structType)(unsafe.Pointer(t)) 740 return tt.Field(i) 741 } 742 743 func (t *rtype) FieldByIndex(index []int) StructField { 744 if t.Kind() != Struct { 745 panic("reflect: FieldByIndex of non-struct type " + t.String()) 746 } 747 tt := (*structType)(unsafe.Pointer(t)) 748 return tt.FieldByIndex(index) 749 } 750 751 func (t *rtype) FieldByName(name string) (StructField, bool) { 752 if t.Kind() != Struct { 753 panic("reflect: FieldByName of non-struct type " + t.String()) 754 } 755 tt := (*structType)(unsafe.Pointer(t)) 756 return tt.FieldByName(name) 757 } 758 759 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 760 if t.Kind() != Struct { 761 panic("reflect: FieldByNameFunc of non-struct type " + t.String()) 762 } 763 tt := (*structType)(unsafe.Pointer(t)) 764 return tt.FieldByNameFunc(match) 765 } 766 767 func (t *rtype) Key() Type { 768 if t.Kind() != Map { 769 panic("reflect: Key of non-map type " + t.String()) 770 } 771 tt := (*mapType)(unsafe.Pointer(t)) 772 return toType(tt.Key) 773 } 774 775 func (t *rtype) Len() int { 776 if t.Kind() != Array { 777 panic("reflect: Len of non-array type " + t.String()) 778 } 779 tt := (*arrayType)(unsafe.Pointer(t)) 780 return int(tt.Len) 781 } 782 783 func (t *rtype) NumField() int { 784 if t.Kind() != Struct { 785 panic("reflect: NumField of non-struct type " + t.String()) 786 } 787 tt := (*structType)(unsafe.Pointer(t)) 788 return len(tt.Fields) 789 } 790 791 func (t *rtype) In(i int) Type { 792 if t.Kind() != Func { 793 panic("reflect: In of non-func type " + t.String()) 794 } 795 tt := (*abi.FuncType)(unsafe.Pointer(t)) 796 return toType(tt.InSlice()[i]) 797 } 798 799 func (t *rtype) NumIn() int { 800 if t.Kind() != Func { 801 panic("reflect: NumIn of non-func type " + t.String()) 802 } 803 tt := (*abi.FuncType)(unsafe.Pointer(t)) 804 return tt.NumIn() 805 } 806 807 func (t *rtype) NumOut() int { 808 if t.Kind() != Func { 809 panic("reflect: NumOut of non-func type " + t.String()) 810 } 811 tt := (*abi.FuncType)(unsafe.Pointer(t)) 812 return tt.NumOut() 813 } 814 815 func (t *rtype) Out(i int) Type { 816 if t.Kind() != Func { 817 panic("reflect: Out of non-func type " + t.String()) 818 } 819 tt := (*abi.FuncType)(unsafe.Pointer(t)) 820 return toType(tt.OutSlice()[i]) 821 } 822 823 func (t *rtype) IsVariadic() bool { 824 if t.Kind() != Func { 825 panic("reflect: IsVariadic of non-func type " + t.String()) 826 } 827 tt := (*abi.FuncType)(unsafe.Pointer(t)) 828 return tt.IsVariadic() 829 } 830 831 func (t *rtype) OverflowComplex(x complex128) bool { 832 k := t.Kind() 833 switch k { 834 case Complex64: 835 return overflowFloat32(real(x)) || overflowFloat32(imag(x)) 836 case Complex128: 837 return false 838 } 839 panic("reflect: OverflowComplex of non-complex type " + t.String()) 840 } 841 842 func (t *rtype) OverflowFloat(x float64) bool { 843 k := t.Kind() 844 switch k { 845 case Float32: 846 return overflowFloat32(x) 847 case Float64: 848 return false 849 } 850 panic("reflect: OverflowFloat of non-float type " + t.String()) 851 } 852 853 func (t *rtype) OverflowInt(x int64) bool { 854 k := t.Kind() 855 switch k { 856 case Int, Int8, Int16, Int32, Int64: 857 bitSize := t.Size() * 8 858 trunc := (x << (64 - bitSize)) >> (64 - bitSize) 859 return x != trunc 860 } 861 panic("reflect: OverflowInt of non-int type " + t.String()) 862 } 863 864 func (t *rtype) OverflowUint(x uint64) bool { 865 k := t.Kind() 866 switch k { 867 case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64: 868 bitSize := t.Size() * 8 869 trunc := (x << (64 - bitSize)) >> (64 - bitSize) 870 return x != trunc 871 } 872 panic("reflect: OverflowUint of non-uint type " + t.String()) 873 } 874 875 // add returns p+x. 876 // 877 // The whySafe string is ignored, so that the function still inlines 878 // as efficiently as p+x, but all call sites should use the string to 879 // record why the addition is safe, which is to say why the addition 880 // does not cause x to advance to the very end of p's allocation 881 // and therefore point incorrectly at the next block in memory. 882 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { 883 return unsafe.Pointer(uintptr(p) + x) 884 } 885 886 func (d ChanDir) String() string { 887 switch d { 888 case SendDir: 889 return "chan<-" 890 case RecvDir: 891 return "<-chan" 892 case BothDir: 893 return "chan" 894 } 895 return "ChanDir" + strconv.Itoa(int(d)) 896 } 897 898 // Method returns the i'th method in the type's method set. 899 func (t *interfaceType) Method(i int) (m Method) { 900 if i < 0 || i >= len(t.Methods) { 901 return 902 } 903 p := &t.Methods[i] 904 pname := t.nameOff(p.Name) 905 m.Name = pname.Name() 906 if !pname.IsExported() { 907 m.PkgPath = pkgPath(pname) 908 if m.PkgPath == "" { 909 m.PkgPath = t.PkgPath.Name() 910 } 911 } 912 m.Type = toType(t.typeOff(p.Typ)) 913 m.Index = i 914 return 915 } 916 917 // NumMethod returns the number of interface methods in the type's method set. 918 func (t *interfaceType) NumMethod() int { return len(t.Methods) } 919 920 // MethodByName method with the given name in the type's method set. 921 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 922 if t == nil { 923 return 924 } 925 var p *abi.Imethod 926 for i := range t.Methods { 927 p = &t.Methods[i] 928 if t.nameOff(p.Name).Name() == name { 929 return t.Method(i), true 930 } 931 } 932 return 933 } 934 935 // A StructField describes a single field in a struct. 936 type StructField struct { 937 // Name is the field name. 938 Name string 939 940 // PkgPath is the package path that qualifies a lower case (unexported) 941 // field name. It is empty for upper case (exported) field names. 942 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 943 PkgPath string 944 945 Type Type // field type 946 Tag StructTag // field tag string 947 Offset uintptr // offset within struct, in bytes 948 Index []int // index sequence for Type.FieldByIndex 949 Anonymous bool // is an embedded field 950 } 951 952 // IsExported reports whether the field is exported. 953 func (f StructField) IsExported() bool { 954 return f.PkgPath == "" 955 } 956 957 // A StructTag is the tag string in a struct field. 958 // 959 // By convention, tag strings are a concatenation of 960 // optionally space-separated key:"value" pairs. 961 // Each key is a non-empty string consisting of non-control 962 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 963 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 964 // characters and Go string literal syntax. 965 type StructTag string 966 967 // Get returns the value associated with key in the tag string. 968 // If there is no such key in the tag, Get returns the empty string. 969 // If the tag does not have the conventional format, the value 970 // returned by Get is unspecified. To determine whether a tag is 971 // explicitly set to the empty string, use Lookup. 972 func (tag StructTag) Get(key string) string { 973 v, _ := tag.Lookup(key) 974 return v 975 } 976 977 // Lookup returns the value associated with key in the tag string. 978 // If the key is present in the tag the value (which may be empty) 979 // is returned. Otherwise the returned value will be the empty string. 980 // The ok return value reports whether the value was explicitly set in 981 // the tag string. If the tag does not have the conventional format, 982 // the value returned by Lookup is unspecified. 983 func (tag StructTag) Lookup(key string) (value string, ok bool) { 984 // When modifying this code, also update the validateStructTag code 985 // in cmd/vet/structtag.go. 986 987 for tag != "" { 988 // Skip leading space. 989 i := 0 990 for i < len(tag) && tag[i] == ' ' { 991 i++ 992 } 993 tag = tag[i:] 994 if tag == "" { 995 break 996 } 997 998 // Scan to colon. A space, a quote or a control character is a syntax error. 999 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1000 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1001 // as it is simpler to inspect the tag's bytes than the tag's runes. 1002 i = 0 1003 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1004 i++ 1005 } 1006 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1007 break 1008 } 1009 name := string(tag[:i]) 1010 tag = tag[i+1:] 1011 1012 // Scan quoted string to find value. 1013 i = 1 1014 for i < len(tag) && tag[i] != '"' { 1015 if tag[i] == '\\' { 1016 i++ 1017 } 1018 i++ 1019 } 1020 if i >= len(tag) { 1021 break 1022 } 1023 qvalue := string(tag[:i+1]) 1024 tag = tag[i+1:] 1025 1026 if key == name { 1027 value, err := strconv.Unquote(qvalue) 1028 if err != nil { 1029 break 1030 } 1031 return value, true 1032 } 1033 } 1034 return "", false 1035 } 1036 1037 // Field returns the i'th struct field. 1038 func (t *structType) Field(i int) (f StructField) { 1039 if i < 0 || i >= len(t.Fields) { 1040 panic("reflect: Field index out of bounds") 1041 } 1042 p := &t.Fields[i] 1043 f.Type = toType(p.Typ) 1044 f.Name = p.Name.Name() 1045 f.Anonymous = p.Embedded() 1046 if !p.Name.IsExported() { 1047 f.PkgPath = t.PkgPath.Name() 1048 } 1049 if tag := p.Name.Tag(); tag != "" { 1050 f.Tag = StructTag(tag) 1051 } 1052 f.Offset = p.Offset 1053 1054 // NOTE(rsc): This is the only allocation in the interface 1055 // presented by a reflect.Type. It would be nice to avoid, 1056 // at least in the common cases, but we need to make sure 1057 // that misbehaving clients of reflect cannot affect other 1058 // uses of reflect. One possibility is CL 5371098, but we 1059 // postponed that ugliness until there is a demonstrated 1060 // need for the performance. This is issue 2320. 1061 f.Index = []int{i} 1062 return 1063 } 1064 1065 // TODO(gri): Should there be an error/bool indicator if the index 1066 // is wrong for FieldByIndex? 1067 1068 // FieldByIndex returns the nested field corresponding to index. 1069 func (t *structType) FieldByIndex(index []int) (f StructField) { 1070 f.Type = toType(&t.Type) 1071 for i, x := range index { 1072 if i > 0 { 1073 ft := f.Type 1074 if ft.Kind() == Pointer && ft.Elem().Kind() == Struct { 1075 ft = ft.Elem() 1076 } 1077 f.Type = ft 1078 } 1079 f = f.Type.Field(x) 1080 } 1081 return 1082 } 1083 1084 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1085 type fieldScan struct { 1086 typ *structType 1087 index []int 1088 } 1089 1090 // FieldByNameFunc returns the struct field with a name that satisfies the 1091 // match function and a boolean to indicate if the field was found. 1092 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1093 // This uses the same condition that the Go language does: there must be a unique instance 1094 // of the match at a given depth level. If there are multiple instances of a match at the 1095 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1096 // The algorithm is breadth first search, one depth level at a time. 1097 1098 // The current and next slices are work queues: 1099 // current lists the fields to visit on this depth level, 1100 // and next lists the fields on the next lower level. 1101 current := []fieldScan{} 1102 next := []fieldScan{{typ: t}} 1103 1104 // nextCount records the number of times an embedded type has been 1105 // encountered and considered for queueing in the 'next' slice. 1106 // We only queue the first one, but we increment the count on each. 1107 // If a struct type T can be reached more than once at a given depth level, 1108 // then it annihilates itself and need not be considered at all when we 1109 // process that next depth level. 1110 var nextCount map[*structType]int 1111 1112 // visited records the structs that have been considered already. 1113 // Embedded pointer fields can create cycles in the graph of 1114 // reachable embedded types; visited avoids following those cycles. 1115 // It also avoids duplicated effort: if we didn't find the field in an 1116 // embedded type T at level 2, we won't find it in one at level 4 either. 1117 visited := map[*structType]bool{} 1118 1119 for len(next) > 0 { 1120 current, next = next, current[:0] 1121 count := nextCount 1122 nextCount = nil 1123 1124 // Process all the fields at this depth, now listed in 'current'. 1125 // The loop queues embedded fields found in 'next', for processing during the next 1126 // iteration. The multiplicity of the 'current' field counts is recorded 1127 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1128 for _, scan := range current { 1129 t := scan.typ 1130 if visited[t] { 1131 // We've looked through this type before, at a higher level. 1132 // That higher level would shadow the lower level we're now at, 1133 // so this one can't be useful to us. Ignore it. 1134 continue 1135 } 1136 visited[t] = true 1137 for i := range t.Fields { 1138 f := &t.Fields[i] 1139 // Find name and (for embedded field) type for field f. 1140 fname := f.Name.Name() 1141 var ntyp *abi.Type 1142 if f.Embedded() { 1143 // Embedded field of type T or *T. 1144 ntyp = f.Typ 1145 if ntyp.Kind() == abi.Pointer { 1146 ntyp = ntyp.Elem() 1147 } 1148 } 1149 1150 // Does it match? 1151 if match(fname) { 1152 // Potential match 1153 if count[t] > 1 || ok { 1154 // Name appeared multiple times at this level: annihilate. 1155 return StructField{}, false 1156 } 1157 result = t.Field(i) 1158 result.Index = nil 1159 result.Index = append(result.Index, scan.index...) 1160 result.Index = append(result.Index, i) 1161 ok = true 1162 continue 1163 } 1164 1165 // Queue embedded struct fields for processing with next level, 1166 // but only if we haven't seen a match yet at this level and only 1167 // if the embedded types haven't already been queued. 1168 if ok || ntyp == nil || ntyp.Kind() != abi.Struct { 1169 continue 1170 } 1171 styp := (*structType)(unsafe.Pointer(ntyp)) 1172 if nextCount[styp] > 0 { 1173 nextCount[styp] = 2 // exact multiple doesn't matter 1174 continue 1175 } 1176 if nextCount == nil { 1177 nextCount = map[*structType]int{} 1178 } 1179 nextCount[styp] = 1 1180 if count[t] > 1 { 1181 nextCount[styp] = 2 // exact multiple doesn't matter 1182 } 1183 var index []int 1184 index = append(index, scan.index...) 1185 index = append(index, i) 1186 next = append(next, fieldScan{styp, index}) 1187 } 1188 } 1189 if ok { 1190 break 1191 } 1192 } 1193 return 1194 } 1195 1196 // FieldByName returns the struct field with the given name 1197 // and a boolean to indicate if the field was found. 1198 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1199 // Quick check for top-level name, or struct without embedded fields. 1200 hasEmbeds := false 1201 if name != "" { 1202 for i := range t.Fields { 1203 tf := &t.Fields[i] 1204 if tf.Name.Name() == name { 1205 return t.Field(i), true 1206 } 1207 if tf.Embedded() { 1208 hasEmbeds = true 1209 } 1210 } 1211 } 1212 if !hasEmbeds { 1213 return 1214 } 1215 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1216 } 1217 1218 // TypeOf returns the reflection [Type] that represents the dynamic type of i. 1219 // If i is a nil interface value, TypeOf returns nil. 1220 func TypeOf(i any) Type { 1221 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1222 // Noescape so this doesn't make i to escape. See the comment 1223 // at Value.typ for why this is safe. 1224 return toType((*abi.Type)(noescape(unsafe.Pointer(eface.typ)))) 1225 } 1226 1227 // rtypeOf directly extracts the *rtype of the provided value. 1228 func rtypeOf(i any) *abi.Type { 1229 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1230 return eface.typ 1231 } 1232 1233 // ptrMap is the cache for PointerTo. 1234 var ptrMap sync.Map // map[*rtype]*ptrType 1235 1236 // PtrTo returns the pointer type with element t. 1237 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1238 // 1239 // PtrTo is the old spelling of [PointerTo]. 1240 // The two functions behave identically. 1241 // 1242 // Deprecated: Superseded by [PointerTo]. 1243 func PtrTo(t Type) Type { return PointerTo(t) } 1244 1245 // PointerTo returns the pointer type with element t. 1246 // For example, if t represents type Foo, PointerTo(t) represents *Foo. 1247 func PointerTo(t Type) Type { 1248 return toRType(t.(*rtype).ptrTo()) 1249 } 1250 1251 func (t *rtype) ptrTo() *abi.Type { 1252 at := &t.t 1253 if at.PtrToThis != 0 { 1254 return t.typeOff(at.PtrToThis) 1255 } 1256 1257 // Check the cache. 1258 if pi, ok := ptrMap.Load(t); ok { 1259 return &pi.(*ptrType).Type 1260 } 1261 1262 // Look in known types. 1263 s := "*" + t.String() 1264 for _, tt := range typesByString(s) { 1265 p := (*ptrType)(unsafe.Pointer(tt)) 1266 if p.Elem != &t.t { 1267 continue 1268 } 1269 pi, _ := ptrMap.LoadOrStore(t, p) 1270 return &pi.(*ptrType).Type 1271 } 1272 1273 // Create a new ptrType starting with the description 1274 // of an *unsafe.Pointer. 1275 var iptr any = (*unsafe.Pointer)(nil) 1276 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1277 pp := *prototype 1278 1279 pp.Str = resolveReflectName(newName(s, "", false, false)) 1280 pp.PtrToThis = 0 1281 1282 // For the type structures linked into the binary, the 1283 // compiler provides a good hash of the string. 1284 // Create a good hash for the new string by using 1285 // the FNV-1 hash's mixing function to combine the 1286 // old hash and the new "*". 1287 pp.Hash = fnv1(t.t.Hash, '*') 1288 1289 pp.Elem = at 1290 1291 pi, _ := ptrMap.LoadOrStore(t, &pp) 1292 return &pi.(*ptrType).Type 1293 } 1294 1295 func ptrTo(t *abi.Type) *abi.Type { 1296 return toRType(t).ptrTo() 1297 } 1298 1299 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1300 func fnv1(x uint32, list ...byte) uint32 { 1301 for _, b := range list { 1302 x = x*16777619 ^ uint32(b) 1303 } 1304 return x 1305 } 1306 1307 func (t *rtype) Implements(u Type) bool { 1308 if u == nil { 1309 panic("reflect: nil type passed to Type.Implements") 1310 } 1311 if u.Kind() != Interface { 1312 panic("reflect: non-interface type passed to Type.Implements") 1313 } 1314 return implements(u.common(), t.common()) 1315 } 1316 1317 func (t *rtype) AssignableTo(u Type) bool { 1318 if u == nil { 1319 panic("reflect: nil type passed to Type.AssignableTo") 1320 } 1321 uu := u.common() 1322 return directlyAssignable(uu, t.common()) || implements(uu, t.common()) 1323 } 1324 1325 func (t *rtype) ConvertibleTo(u Type) bool { 1326 if u == nil { 1327 panic("reflect: nil type passed to Type.ConvertibleTo") 1328 } 1329 return convertOp(u.common(), t.common()) != nil 1330 } 1331 1332 func (t *rtype) Comparable() bool { 1333 return t.t.Equal != nil 1334 } 1335 1336 // implements reports whether the type V implements the interface type T. 1337 func implements(T, V *abi.Type) bool { 1338 if T.Kind() != abi.Interface { 1339 return false 1340 } 1341 t := (*interfaceType)(unsafe.Pointer(T)) 1342 if len(t.Methods) == 0 { 1343 return true 1344 } 1345 1346 // The same algorithm applies in both cases, but the 1347 // method tables for an interface type and a concrete type 1348 // are different, so the code is duplicated. 1349 // In both cases the algorithm is a linear scan over the two 1350 // lists - T's methods and V's methods - simultaneously. 1351 // Since method tables are stored in a unique sorted order 1352 // (alphabetical, with no duplicate method names), the scan 1353 // through V's methods must hit a match for each of T's 1354 // methods along the way, or else V does not implement T. 1355 // This lets us run the scan in overall linear time instead of 1356 // the quadratic time a naive search would require. 1357 // See also ../runtime/iface.go. 1358 if V.Kind() == abi.Interface { 1359 v := (*interfaceType)(unsafe.Pointer(V)) 1360 i := 0 1361 for j := 0; j < len(v.Methods); j++ { 1362 tm := &t.Methods[i] 1363 tmName := t.nameOff(tm.Name) 1364 vm := &v.Methods[j] 1365 vmName := nameOffFor(V, vm.Name) 1366 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) { 1367 if !tmName.IsExported() { 1368 tmPkgPath := pkgPath(tmName) 1369 if tmPkgPath == "" { 1370 tmPkgPath = t.PkgPath.Name() 1371 } 1372 vmPkgPath := pkgPath(vmName) 1373 if vmPkgPath == "" { 1374 vmPkgPath = v.PkgPath.Name() 1375 } 1376 if tmPkgPath != vmPkgPath { 1377 continue 1378 } 1379 } 1380 if i++; i >= len(t.Methods) { 1381 return true 1382 } 1383 } 1384 } 1385 return false 1386 } 1387 1388 v := V.Uncommon() 1389 if v == nil { 1390 return false 1391 } 1392 i := 0 1393 vmethods := v.Methods() 1394 for j := 0; j < int(v.Mcount); j++ { 1395 tm := &t.Methods[i] 1396 tmName := t.nameOff(tm.Name) 1397 vm := vmethods[j] 1398 vmName := nameOffFor(V, vm.Name) 1399 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) { 1400 if !tmName.IsExported() { 1401 tmPkgPath := pkgPath(tmName) 1402 if tmPkgPath == "" { 1403 tmPkgPath = t.PkgPath.Name() 1404 } 1405 vmPkgPath := pkgPath(vmName) 1406 if vmPkgPath == "" { 1407 vmPkgPath = nameOffFor(V, v.PkgPath).Name() 1408 } 1409 if tmPkgPath != vmPkgPath { 1410 continue 1411 } 1412 } 1413 if i++; i >= len(t.Methods) { 1414 return true 1415 } 1416 } 1417 } 1418 return false 1419 } 1420 1421 // specialChannelAssignability reports whether a value x of channel type V 1422 // can be directly assigned (using memmove) to another channel type T. 1423 // https://golang.org/doc/go_spec.html#Assignability 1424 // T and V must be both of Chan kind. 1425 func specialChannelAssignability(T, V *abi.Type) bool { 1426 // Special case: 1427 // x is a bidirectional channel value, T is a channel type, 1428 // x's type V and T have identical element types, 1429 // and at least one of V or T is not a defined type. 1430 return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true) 1431 } 1432 1433 // directlyAssignable reports whether a value x of type V can be directly 1434 // assigned (using memmove) to a value of type T. 1435 // https://golang.org/doc/go_spec.html#Assignability 1436 // Ignoring the interface rules (implemented elsewhere) 1437 // and the ideal constant rules (no ideal constants at run time). 1438 func directlyAssignable(T, V *abi.Type) bool { 1439 // x's type V is identical to T? 1440 if T == V { 1441 return true 1442 } 1443 1444 // Otherwise at least one of T and V must not be defined 1445 // and they must have the same kind. 1446 if T.HasName() && V.HasName() || T.Kind() != V.Kind() { 1447 return false 1448 } 1449 1450 if T.Kind() == abi.Chan && specialChannelAssignability(T, V) { 1451 return true 1452 } 1453 1454 // x's type T and V must have identical underlying types. 1455 return haveIdenticalUnderlyingType(T, V, true) 1456 } 1457 1458 func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool { 1459 if cmpTags { 1460 return T == V 1461 } 1462 1463 if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) { 1464 return false 1465 } 1466 1467 return haveIdenticalUnderlyingType(T, V, false) 1468 } 1469 1470 func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool { 1471 if T == V { 1472 return true 1473 } 1474 1475 kind := Kind(T.Kind()) 1476 if kind != Kind(V.Kind()) { 1477 return false 1478 } 1479 1480 // Non-composite types of equal kind have same underlying type 1481 // (the predefined instance of the type). 1482 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1483 return true 1484 } 1485 1486 // Composite types. 1487 switch kind { 1488 case Array: 1489 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1490 1491 case Chan: 1492 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1493 1494 case Func: 1495 t := (*funcType)(unsafe.Pointer(T)) 1496 v := (*funcType)(unsafe.Pointer(V)) 1497 if t.OutCount != v.OutCount || t.InCount != v.InCount { 1498 return false 1499 } 1500 for i := 0; i < t.NumIn(); i++ { 1501 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1502 return false 1503 } 1504 } 1505 for i := 0; i < t.NumOut(); i++ { 1506 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1507 return false 1508 } 1509 } 1510 return true 1511 1512 case Interface: 1513 t := (*interfaceType)(unsafe.Pointer(T)) 1514 v := (*interfaceType)(unsafe.Pointer(V)) 1515 if len(t.Methods) == 0 && len(v.Methods) == 0 { 1516 return true 1517 } 1518 // Might have the same methods but still 1519 // need a run time conversion. 1520 return false 1521 1522 case Map: 1523 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1524 1525 case Pointer, Slice: 1526 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1527 1528 case Struct: 1529 t := (*structType)(unsafe.Pointer(T)) 1530 v := (*structType)(unsafe.Pointer(V)) 1531 if len(t.Fields) != len(v.Fields) { 1532 return false 1533 } 1534 if t.PkgPath.Name() != v.PkgPath.Name() { 1535 return false 1536 } 1537 for i := range t.Fields { 1538 tf := &t.Fields[i] 1539 vf := &v.Fields[i] 1540 if tf.Name.Name() != vf.Name.Name() { 1541 return false 1542 } 1543 if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) { 1544 return false 1545 } 1546 if cmpTags && tf.Name.Tag() != vf.Name.Tag() { 1547 return false 1548 } 1549 if tf.Offset != vf.Offset { 1550 return false 1551 } 1552 if tf.Embedded() != vf.Embedded() { 1553 return false 1554 } 1555 } 1556 return true 1557 } 1558 1559 return false 1560 } 1561 1562 // typelinks is implemented in package runtime. 1563 // It returns a slice of the sections in each module, 1564 // and a slice of *rtype offsets in each module. 1565 // 1566 // The types in each module are sorted by string. That is, the first 1567 // two linked types of the first module are: 1568 // 1569 // d0 := sections[0] 1570 // t1 := (*rtype)(add(d0, offset[0][0])) 1571 // t2 := (*rtype)(add(d0, offset[0][1])) 1572 // 1573 // and 1574 // 1575 // t1.String() < t2.String() 1576 // 1577 // Note that strings are not unique identifiers for types: 1578 // there can be more than one with a given string. 1579 // Only types we might want to look up are included: 1580 // pointers, channels, maps, slices, and arrays. 1581 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1582 1583 func rtypeOff(section unsafe.Pointer, off int32) *abi.Type { 1584 return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0")) 1585 } 1586 1587 // typesByString returns the subslice of typelinks() whose elements have 1588 // the given string representation. 1589 // It may be empty (no known types with that string) or may have 1590 // multiple elements (multiple types with that string). 1591 func typesByString(s string) []*abi.Type { 1592 sections, offset := typelinks() 1593 var ret []*abi.Type 1594 1595 for offsI, offs := range offset { 1596 section := sections[offsI] 1597 1598 // We are looking for the first index i where the string becomes >= s. 1599 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1600 i, j := 0, len(offs) 1601 for i < j { 1602 h := int(uint(i+j) >> 1) // avoid overflow when computing h 1603 // i ≤ h < j 1604 if !(stringFor(rtypeOff(section, offs[h])) >= s) { 1605 i = h + 1 // preserves f(i-1) == false 1606 } else { 1607 j = h // preserves f(j) == true 1608 } 1609 } 1610 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1611 1612 // Having found the first, linear scan forward to find the last. 1613 // We could do a second binary search, but the caller is going 1614 // to do a linear scan anyway. 1615 for j := i; j < len(offs); j++ { 1616 typ := rtypeOff(section, offs[j]) 1617 if stringFor(typ) != s { 1618 break 1619 } 1620 ret = append(ret, typ) 1621 } 1622 } 1623 return ret 1624 } 1625 1626 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1627 var lookupCache sync.Map // map[cacheKey]*rtype 1628 1629 // A cacheKey is the key for use in the lookupCache. 1630 // Four values describe any of the types we are looking for: 1631 // type kind, one or two subtypes, and an extra integer. 1632 type cacheKey struct { 1633 kind Kind 1634 t1 *abi.Type 1635 t2 *abi.Type 1636 extra uintptr 1637 } 1638 1639 // The funcLookupCache caches FuncOf lookups. 1640 // FuncOf does not share the common lookupCache since cacheKey is not 1641 // sufficient to represent functions unambiguously. 1642 var funcLookupCache struct { 1643 sync.Mutex // Guards stores (but not loads) on m. 1644 1645 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. 1646 // Elements of m are append-only and thus safe for concurrent reading. 1647 m sync.Map 1648 } 1649 1650 // ChanOf returns the channel type with the given direction and element type. 1651 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1652 // 1653 // The gc runtime imposes a limit of 64 kB on channel element types. 1654 // If t's size is equal to or exceeds this limit, ChanOf panics. 1655 func ChanOf(dir ChanDir, t Type) Type { 1656 typ := t.common() 1657 1658 // Look in cache. 1659 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1660 if ch, ok := lookupCache.Load(ckey); ok { 1661 return ch.(*rtype) 1662 } 1663 1664 // This restriction is imposed by the gc compiler and the runtime. 1665 if typ.Size_ >= 1<<16 { 1666 panic("reflect.ChanOf: element size too large") 1667 } 1668 1669 // Look in known types. 1670 var s string 1671 switch dir { 1672 default: 1673 panic("reflect.ChanOf: invalid dir") 1674 case SendDir: 1675 s = "chan<- " + stringFor(typ) 1676 case RecvDir: 1677 s = "<-chan " + stringFor(typ) 1678 case BothDir: 1679 typeStr := stringFor(typ) 1680 if typeStr[0] == '<' { 1681 // typ is recv chan, need parentheses as "<-" associates with leftmost 1682 // chan possible, see: 1683 // * https://golang.org/ref/spec#Channel_types 1684 // * https://github.com/golang/go/issues/39897 1685 s = "chan (" + typeStr + ")" 1686 } else { 1687 s = "chan " + typeStr 1688 } 1689 } 1690 for _, tt := range typesByString(s) { 1691 ch := (*chanType)(unsafe.Pointer(tt)) 1692 if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) { 1693 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 1694 return ti.(Type) 1695 } 1696 } 1697 1698 // Make a channel type. 1699 var ichan any = (chan unsafe.Pointer)(nil) 1700 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1701 ch := *prototype 1702 ch.TFlag = abi.TFlagRegularMemory 1703 ch.Dir = abi.ChanDir(dir) 1704 ch.Str = resolveReflectName(newName(s, "", false, false)) 1705 ch.Hash = fnv1(typ.Hash, 'c', byte(dir)) 1706 ch.Elem = typ 1707 1708 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type)) 1709 return ti.(Type) 1710 } 1711 1712 // MapOf returns the map type with the given key and element types. 1713 // For example, if k represents int and e represents string, 1714 // MapOf(k, e) represents map[int]string. 1715 // 1716 // If the key type is not a valid map key type (that is, if it does 1717 // not implement Go's == operator), MapOf panics. 1718 func MapOf(key, elem Type) Type { 1719 ktyp := key.common() 1720 etyp := elem.common() 1721 1722 if ktyp.Equal == nil { 1723 panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) 1724 } 1725 1726 // Look in cache. 1727 ckey := cacheKey{Map, ktyp, etyp, 0} 1728 if mt, ok := lookupCache.Load(ckey); ok { 1729 return mt.(Type) 1730 } 1731 1732 // Look in known types. 1733 s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) 1734 for _, tt := range typesByString(s) { 1735 mt := (*mapType)(unsafe.Pointer(tt)) 1736 if mt.Key == ktyp && mt.Elem == etyp { 1737 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 1738 return ti.(Type) 1739 } 1740 } 1741 1742 // Make a map type. 1743 // Note: flag values must match those used in the TMAP case 1744 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. 1745 var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1746 mt := **(**mapType)(unsafe.Pointer(&imap)) 1747 mt.Str = resolveReflectName(newName(s, "", false, false)) 1748 mt.TFlag = 0 1749 mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) 1750 mt.Key = ktyp 1751 mt.Elem = etyp 1752 mt.Bucket = bucketOf(ktyp, etyp) 1753 mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { 1754 return typehash(ktyp, p, seed) 1755 } 1756 mt.Flags = 0 1757 if ktyp.Size_ > abi.MapMaxKeyBytes { 1758 mt.KeySize = uint8(goarch.PtrSize) 1759 mt.Flags |= 1 // indirect key 1760 } else { 1761 mt.KeySize = uint8(ktyp.Size_) 1762 } 1763 if etyp.Size_ > abi.MapMaxElemBytes { 1764 mt.ValueSize = uint8(goarch.PtrSize) 1765 mt.Flags |= 2 // indirect value 1766 } else { 1767 mt.MapType.ValueSize = uint8(etyp.Size_) 1768 } 1769 mt.MapType.BucketSize = uint16(mt.Bucket.Size_) 1770 if isReflexive(ktyp) { 1771 mt.Flags |= 4 1772 } 1773 if needKeyUpdate(ktyp) { 1774 mt.Flags |= 8 1775 } 1776 if hashMightPanic(ktyp) { 1777 mt.Flags |= 16 1778 } 1779 mt.PtrToThis = 0 1780 1781 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) 1782 return ti.(Type) 1783 } 1784 1785 var funcTypes []Type 1786 var funcTypesMutex sync.Mutex 1787 1788 func initFuncTypes(n int) Type { 1789 funcTypesMutex.Lock() 1790 defer funcTypesMutex.Unlock() 1791 if n >= len(funcTypes) { 1792 newFuncTypes := make([]Type, n+1) 1793 copy(newFuncTypes, funcTypes) 1794 funcTypes = newFuncTypes 1795 } 1796 if funcTypes[n] != nil { 1797 return funcTypes[n] 1798 } 1799 1800 funcTypes[n] = StructOf([]StructField{ 1801 { 1802 Name: "FuncType", 1803 Type: TypeOf(funcType{}), 1804 }, 1805 { 1806 Name: "Args", 1807 Type: ArrayOf(n, TypeOf(&rtype{})), 1808 }, 1809 }) 1810 return funcTypes[n] 1811 } 1812 1813 // FuncOf returns the function type with the given argument and result types. 1814 // For example if k represents int and e represents string, 1815 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1816 // 1817 // The variadic argument controls whether the function is variadic. FuncOf 1818 // panics if the in[len(in)-1] does not represent a slice and variadic is 1819 // true. 1820 func FuncOf(in, out []Type, variadic bool) Type { 1821 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1822 panic("reflect.FuncOf: last arg of variadic func must be slice") 1823 } 1824 1825 // Make a func type. 1826 var ifunc any = (func())(nil) 1827 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 1828 n := len(in) + len(out) 1829 1830 if n > 128 { 1831 panic("reflect.FuncOf: too many arguments") 1832 } 1833 1834 o := New(initFuncTypes(n)).Elem() 1835 ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer())) 1836 args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n] 1837 *ft = *prototype 1838 1839 // Build a hash and minimally populate ft. 1840 var hash uint32 1841 for _, in := range in { 1842 t := in.(*rtype) 1843 args = append(args, t) 1844 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) 1845 } 1846 if variadic { 1847 hash = fnv1(hash, 'v') 1848 } 1849 hash = fnv1(hash, '.') 1850 for _, out := range out { 1851 t := out.(*rtype) 1852 args = append(args, t) 1853 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) 1854 } 1855 1856 ft.TFlag = 0 1857 ft.Hash = hash 1858 ft.InCount = uint16(len(in)) 1859 ft.OutCount = uint16(len(out)) 1860 if variadic { 1861 ft.OutCount |= 1 << 15 1862 } 1863 1864 // Look in cache. 1865 if ts, ok := funcLookupCache.m.Load(hash); ok { 1866 for _, t := range ts.([]*abi.Type) { 1867 if haveIdenticalUnderlyingType(&ft.Type, t, true) { 1868 return toRType(t) 1869 } 1870 } 1871 } 1872 1873 // Not in cache, lock and retry. 1874 funcLookupCache.Lock() 1875 defer funcLookupCache.Unlock() 1876 if ts, ok := funcLookupCache.m.Load(hash); ok { 1877 for _, t := range ts.([]*abi.Type) { 1878 if haveIdenticalUnderlyingType(&ft.Type, t, true) { 1879 return toRType(t) 1880 } 1881 } 1882 } 1883 1884 addToCache := func(tt *abi.Type) Type { 1885 var rts []*abi.Type 1886 if rti, ok := funcLookupCache.m.Load(hash); ok { 1887 rts = rti.([]*abi.Type) 1888 } 1889 funcLookupCache.m.Store(hash, append(rts, tt)) 1890 return toType(tt) 1891 } 1892 1893 // Look in known types for the same string representation. 1894 str := funcStr(ft) 1895 for _, tt := range typesByString(str) { 1896 if haveIdenticalUnderlyingType(&ft.Type, tt, true) { 1897 return addToCache(tt) 1898 } 1899 } 1900 1901 // Populate the remaining fields of ft and store in cache. 1902 ft.Str = resolveReflectName(newName(str, "", false, false)) 1903 ft.PtrToThis = 0 1904 return addToCache(&ft.Type) 1905 } 1906 func stringFor(t *abi.Type) string { 1907 return toRType(t).String() 1908 } 1909 1910 // funcStr builds a string representation of a funcType. 1911 func funcStr(ft *funcType) string { 1912 repr := make([]byte, 0, 64) 1913 repr = append(repr, "func("...) 1914 for i, t := range ft.InSlice() { 1915 if i > 0 { 1916 repr = append(repr, ", "...) 1917 } 1918 if ft.IsVariadic() && i == int(ft.InCount)-1 { 1919 repr = append(repr, "..."...) 1920 repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...) 1921 } else { 1922 repr = append(repr, stringFor(t)...) 1923 } 1924 } 1925 repr = append(repr, ')') 1926 out := ft.OutSlice() 1927 if len(out) == 1 { 1928 repr = append(repr, ' ') 1929 } else if len(out) > 1 { 1930 repr = append(repr, " ("...) 1931 } 1932 for i, t := range out { 1933 if i > 0 { 1934 repr = append(repr, ", "...) 1935 } 1936 repr = append(repr, stringFor(t)...) 1937 } 1938 if len(out) > 1 { 1939 repr = append(repr, ')') 1940 } 1941 return string(repr) 1942 } 1943 1944 // isReflexive reports whether the == operation on the type is reflexive. 1945 // That is, x == x for all values x of type t. 1946 func isReflexive(t *abi.Type) bool { 1947 switch Kind(t.Kind()) { 1948 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: 1949 return true 1950 case Float32, Float64, Complex64, Complex128, Interface: 1951 return false 1952 case Array: 1953 tt := (*arrayType)(unsafe.Pointer(t)) 1954 return isReflexive(tt.Elem) 1955 case Struct: 1956 tt := (*structType)(unsafe.Pointer(t)) 1957 for _, f := range tt.Fields { 1958 if !isReflexive(f.Typ) { 1959 return false 1960 } 1961 } 1962 return true 1963 default: 1964 // Func, Map, Slice, Invalid 1965 panic("isReflexive called on non-key type " + stringFor(t)) 1966 } 1967 } 1968 1969 // needKeyUpdate reports whether map overwrites require the key to be copied. 1970 func needKeyUpdate(t *abi.Type) bool { 1971 switch Kind(t.Kind()) { 1972 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: 1973 return false 1974 case Float32, Float64, Complex64, Complex128, Interface, String: 1975 // Float keys can be updated from +0 to -0. 1976 // String keys can be updated to use a smaller backing store. 1977 // Interfaces might have floats or strings in them. 1978 return true 1979 case Array: 1980 tt := (*arrayType)(unsafe.Pointer(t)) 1981 return needKeyUpdate(tt.Elem) 1982 case Struct: 1983 tt := (*structType)(unsafe.Pointer(t)) 1984 for _, f := range tt.Fields { 1985 if needKeyUpdate(f.Typ) { 1986 return true 1987 } 1988 } 1989 return false 1990 default: 1991 // Func, Map, Slice, Invalid 1992 panic("needKeyUpdate called on non-key type " + stringFor(t)) 1993 } 1994 } 1995 1996 // hashMightPanic reports whether the hash of a map key of type t might panic. 1997 func hashMightPanic(t *abi.Type) bool { 1998 switch Kind(t.Kind()) { 1999 case Interface: 2000 return true 2001 case Array: 2002 tt := (*arrayType)(unsafe.Pointer(t)) 2003 return hashMightPanic(tt.Elem) 2004 case Struct: 2005 tt := (*structType)(unsafe.Pointer(t)) 2006 for _, f := range tt.Fields { 2007 if hashMightPanic(f.Typ) { 2008 return true 2009 } 2010 } 2011 return false 2012 default: 2013 return false 2014 } 2015 } 2016 2017 func bucketOf(ktyp, etyp *abi.Type) *abi.Type { 2018 if ktyp.Size_ > abi.MapMaxKeyBytes { 2019 ktyp = ptrTo(ktyp) 2020 } 2021 if etyp.Size_ > abi.MapMaxElemBytes { 2022 etyp = ptrTo(etyp) 2023 } 2024 2025 // Prepare GC data if any. 2026 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes, 2027 // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap. 2028 // Note that since the key and value are known to be <= 128 bytes, 2029 // they're guaranteed to have bitmaps instead of GC programs. 2030 var gcdata *byte 2031 var ptrdata uintptr 2032 2033 size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize 2034 if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { 2035 panic("reflect: bad size computation in MapOf") 2036 } 2037 2038 if ktyp.Pointers() || etyp.Pointers() { 2039 nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize 2040 n := (nptr + 7) / 8 2041 2042 // Runtime needs pointer masks to be a multiple of uintptr in size. 2043 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) 2044 mask := make([]byte, n) 2045 base := uintptr(abi.MapBucketCount / goarch.PtrSize) 2046 2047 if ktyp.Pointers() { 2048 emitGCMask(mask, base, ktyp, abi.MapBucketCount) 2049 } 2050 base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize 2051 2052 if etyp.Pointers() { 2053 emitGCMask(mask, base, etyp, abi.MapBucketCount) 2054 } 2055 base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize 2056 2057 word := base 2058 mask[word/8] |= 1 << (word % 8) 2059 gcdata = &mask[0] 2060 ptrdata = (word + 1) * goarch.PtrSize 2061 2062 // overflow word must be last 2063 if ptrdata != size { 2064 panic("reflect: bad layout computation in MapOf") 2065 } 2066 } 2067 2068 b := &abi.Type{ 2069 Align_: goarch.PtrSize, 2070 Size_: size, 2071 Kind_: uint8(Struct), 2072 PtrBytes: ptrdata, 2073 GCData: gcdata, 2074 } 2075 s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")" 2076 b.Str = resolveReflectName(newName(s, "", false, false)) 2077 return b 2078 } 2079 2080 func (t *rtype) gcSlice(begin, end uintptr) []byte { 2081 return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end] 2082 } 2083 2084 // emitGCMask writes the GC mask for [n]typ into out, starting at bit 2085 // offset base. 2086 func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) { 2087 if typ.Kind_&kindGCProg != 0 { 2088 panic("reflect: unexpected GC program") 2089 } 2090 ptrs := typ.PtrBytes / goarch.PtrSize 2091 words := typ.Size_ / goarch.PtrSize 2092 mask := typ.GcSlice(0, (ptrs+7)/8) 2093 for j := uintptr(0); j < ptrs; j++ { 2094 if (mask[j/8]>>(j%8))&1 != 0 { 2095 for i := uintptr(0); i < n; i++ { 2096 k := base + i*words + j 2097 out[k/8] |= 1 << (k % 8) 2098 } 2099 } 2100 } 2101 } 2102 2103 // appendGCProg appends the GC program for the first ptrdata bytes of 2104 // typ to dst and returns the extended slice. 2105 func appendGCProg(dst []byte, typ *abi.Type) []byte { 2106 if typ.Kind_&kindGCProg != 0 { 2107 // Element has GC program; emit one element. 2108 n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData))) 2109 prog := typ.GcSlice(4, 4+n-1) 2110 return append(dst, prog...) 2111 } 2112 2113 // Element is small with pointer mask; use as literal bits. 2114 ptrs := typ.PtrBytes / goarch.PtrSize 2115 mask := typ.GcSlice(0, (ptrs+7)/8) 2116 2117 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2118 for ; ptrs > 120; ptrs -= 120 { 2119 dst = append(dst, 120) 2120 dst = append(dst, mask[:15]...) 2121 mask = mask[15:] 2122 } 2123 2124 dst = append(dst, byte(ptrs)) 2125 dst = append(dst, mask...) 2126 return dst 2127 } 2128 2129 // SliceOf returns the slice type with element type t. 2130 // For example, if t represents int, SliceOf(t) represents []int. 2131 func SliceOf(t Type) Type { 2132 typ := t.common() 2133 2134 // Look in cache. 2135 ckey := cacheKey{Slice, typ, nil, 0} 2136 if slice, ok := lookupCache.Load(ckey); ok { 2137 return slice.(Type) 2138 } 2139 2140 // Look in known types. 2141 s := "[]" + stringFor(typ) 2142 for _, tt := range typesByString(s) { 2143 slice := (*sliceType)(unsafe.Pointer(tt)) 2144 if slice.Elem == typ { 2145 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 2146 return ti.(Type) 2147 } 2148 } 2149 2150 // Make a slice type. 2151 var islice any = ([]unsafe.Pointer)(nil) 2152 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2153 slice := *prototype 2154 slice.TFlag = 0 2155 slice.Str = resolveReflectName(newName(s, "", false, false)) 2156 slice.Hash = fnv1(typ.Hash, '[') 2157 slice.Elem = typ 2158 slice.PtrToThis = 0 2159 2160 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type)) 2161 return ti.(Type) 2162 } 2163 2164 // The structLookupCache caches StructOf lookups. 2165 // StructOf does not share the common lookupCache since we need to pin 2166 // the memory associated with *structTypeFixedN. 2167 var structLookupCache struct { 2168 sync.Mutex // Guards stores (but not loads) on m. 2169 2170 // m is a map[uint32][]Type keyed by the hash calculated in StructOf. 2171 // Elements in m are append-only and thus safe for concurrent reading. 2172 m sync.Map 2173 } 2174 2175 type structTypeUncommon struct { 2176 structType 2177 u uncommonType 2178 } 2179 2180 // isLetter reports whether a given 'rune' is classified as a Letter. 2181 func isLetter(ch rune) bool { 2182 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) 2183 } 2184 2185 // isValidFieldName checks if a string is a valid (struct) field name or not. 2186 // 2187 // According to the language spec, a field name should be an identifier. 2188 // 2189 // identifier = letter { letter | unicode_digit } . 2190 // letter = unicode_letter | "_" . 2191 func isValidFieldName(fieldName string) bool { 2192 for i, c := range fieldName { 2193 if i == 0 && !isLetter(c) { 2194 return false 2195 } 2196 2197 if !(isLetter(c) || unicode.IsDigit(c)) { 2198 return false 2199 } 2200 } 2201 2202 return len(fieldName) > 0 2203 } 2204 2205 // This must match cmd/compile/internal/compare.IsRegularMemory 2206 func isRegularMemory(t Type) bool { 2207 switch t.Kind() { 2208 case Array: 2209 elem := t.Elem() 2210 if isRegularMemory(elem) { 2211 return true 2212 } 2213 return elem.Comparable() && t.Len() == 0 2214 case Int8, Int16, Int32, Int64, Int, Uint8, Uint16, Uint32, Uint64, Uint, Uintptr, Chan, Pointer, Bool, UnsafePointer: 2215 return true 2216 case Struct: 2217 num := t.NumField() 2218 switch num { 2219 case 0: 2220 return true 2221 case 1: 2222 field := t.Field(0) 2223 if field.Name == "_" { 2224 return false 2225 } 2226 return isRegularMemory(field.Type) 2227 default: 2228 for i := range num { 2229 field := t.Field(i) 2230 if field.Name == "_" || !isRegularMemory(field.Type) || isPaddedField(t, i) { 2231 return false 2232 } 2233 } 2234 return true 2235 } 2236 } 2237 return false 2238 } 2239 2240 // isPaddedField reports whether the i'th field of struct type t is followed 2241 // by padding. 2242 func isPaddedField(t Type, i int) bool { 2243 field := t.Field(i) 2244 if i+1 < t.NumField() { 2245 return field.Offset+field.Type.Size() != t.Field(i+1).Offset 2246 } 2247 return field.Offset+field.Type.Size() != t.Size() 2248 } 2249 2250 // StructOf returns the struct type containing fields. 2251 // The Offset and Index fields are ignored and computed as they would be 2252 // by the compiler. 2253 // 2254 // StructOf currently does not support promoted methods of embedded fields 2255 // and panics if passed unexported StructFields. 2256 func StructOf(fields []StructField) Type { 2257 var ( 2258 hash = fnv1(0, []byte("struct {")...) 2259 size uintptr 2260 typalign uint8 2261 comparable = true 2262 methods []abi.Method 2263 2264 fs = make([]structField, len(fields)) 2265 repr = make([]byte, 0, 64) 2266 fset = map[string]struct{}{} // fields' names 2267 2268 hasGCProg = false // records whether a struct-field type has a GCProg 2269 ) 2270 2271 lastzero := uintptr(0) 2272 repr = append(repr, "struct {"...) 2273 pkgpath := "" 2274 for i, field := range fields { 2275 if field.Name == "" { 2276 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") 2277 } 2278 if !isValidFieldName(field.Name) { 2279 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") 2280 } 2281 if field.Type == nil { 2282 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2283 } 2284 f, fpkgpath := runtimeStructField(field) 2285 ft := f.Typ 2286 if ft.Kind_&kindGCProg != 0 { 2287 hasGCProg = true 2288 } 2289 if fpkgpath != "" { 2290 if pkgpath == "" { 2291 pkgpath = fpkgpath 2292 } else if pkgpath != fpkgpath { 2293 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) 2294 } 2295 } 2296 2297 // Update string and hash 2298 name := f.Name.Name() 2299 hash = fnv1(hash, []byte(name)...) 2300 repr = append(repr, (" " + name)...) 2301 if f.Embedded() { 2302 // Embedded field 2303 if f.Typ.Kind() == abi.Pointer { 2304 // Embedded ** and *interface{} are illegal 2305 elem := ft.Elem() 2306 if k := elem.Kind(); k == abi.Pointer || k == abi.Interface { 2307 panic("reflect.StructOf: illegal embedded field type " + stringFor(ft)) 2308 } 2309 } 2310 2311 switch Kind(f.Typ.Kind()) { 2312 case Interface: 2313 ift := (*interfaceType)(unsafe.Pointer(ft)) 2314 for _, m := range ift.Methods { 2315 if pkgPath(ift.nameOff(m.Name)) != "" { 2316 // TODO(sbinet). Issue 15924. 2317 panic("reflect: embedded interface with unexported method(s) not implemented") 2318 } 2319 2320 fnStub := resolveReflectText(unsafe.Pointer(abi.FuncPCABIInternal(embeddedIfaceMethStub))) 2321 methods = append(methods, abi.Method{ 2322 Name: resolveReflectName(ift.nameOff(m.Name)), 2323 Mtyp: resolveReflectType(ift.typeOff(m.Typ)), 2324 Ifn: fnStub, 2325 Tfn: fnStub, 2326 }) 2327 } 2328 case Pointer: 2329 ptr := (*ptrType)(unsafe.Pointer(ft)) 2330 if unt := ptr.Uncommon(); unt != nil { 2331 if i > 0 && unt.Mcount > 0 { 2332 // Issue 15924. 2333 panic("reflect: embedded type with methods not implemented if type is not first field") 2334 } 2335 if len(fields) > 1 { 2336 panic("reflect: embedded type with methods not implemented if there is more than one field") 2337 } 2338 for _, m := range unt.Methods() { 2339 mname := nameOffFor(ft, m.Name) 2340 if pkgPath(mname) != "" { 2341 // TODO(sbinet). 2342 // Issue 15924. 2343 panic("reflect: embedded interface with unexported method(s) not implemented") 2344 } 2345 methods = append(methods, abi.Method{ 2346 Name: resolveReflectName(mname), 2347 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), 2348 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), 2349 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), 2350 }) 2351 } 2352 } 2353 if unt := ptr.Elem.Uncommon(); unt != nil { 2354 for _, m := range unt.Methods() { 2355 mname := nameOffFor(ft, m.Name) 2356 if pkgPath(mname) != "" { 2357 // TODO(sbinet) 2358 // Issue 15924. 2359 panic("reflect: embedded interface with unexported method(s) not implemented") 2360 } 2361 methods = append(methods, abi.Method{ 2362 Name: resolveReflectName(mname), 2363 Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)), 2364 Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)), 2365 Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)), 2366 }) 2367 } 2368 } 2369 default: 2370 if unt := ft.Uncommon(); unt != nil { 2371 if i > 0 && unt.Mcount > 0 { 2372 // Issue 15924. 2373 panic("reflect: embedded type with methods not implemented if type is not first field") 2374 } 2375 if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 { 2376 panic("reflect: embedded type with methods not implemented for non-pointer type") 2377 } 2378 for _, m := range unt.Methods() { 2379 mname := nameOffFor(ft, m.Name) 2380 if pkgPath(mname) != "" { 2381 // TODO(sbinet) 2382 // Issue 15924. 2383 panic("reflect: embedded interface with unexported method(s) not implemented") 2384 } 2385 methods = append(methods, abi.Method{ 2386 Name: resolveReflectName(mname), 2387 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), 2388 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), 2389 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), 2390 }) 2391 2392 } 2393 } 2394 } 2395 } 2396 if _, dup := fset[name]; dup && name != "_" { 2397 panic("reflect.StructOf: duplicate field " + name) 2398 } 2399 fset[name] = struct{}{} 2400 2401 hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash)) 2402 2403 repr = append(repr, (" " + stringFor(ft))...) 2404 if f.Name.HasTag() { 2405 hash = fnv1(hash, []byte(f.Name.Tag())...) 2406 repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...) 2407 } 2408 if i < len(fields)-1 { 2409 repr = append(repr, ';') 2410 } 2411 2412 comparable = comparable && (ft.Equal != nil) 2413 2414 offset := align(size, uintptr(ft.Align_)) 2415 if offset < size { 2416 panic("reflect.StructOf: struct size would exceed virtual address space") 2417 } 2418 if ft.Align_ > typalign { 2419 typalign = ft.Align_ 2420 } 2421 size = offset + ft.Size_ 2422 if size < offset { 2423 panic("reflect.StructOf: struct size would exceed virtual address space") 2424 } 2425 f.Offset = offset 2426 2427 if ft.Size_ == 0 { 2428 lastzero = size 2429 } 2430 2431 fs[i] = f 2432 } 2433 2434 if size > 0 && lastzero == size { 2435 // This is a non-zero sized struct that ends in a 2436 // zero-sized field. We add an extra byte of padding, 2437 // to ensure that taking the address of the final 2438 // zero-sized field can't manufacture a pointer to the 2439 // next object in the heap. See issue 9401. 2440 size++ 2441 if size == 0 { 2442 panic("reflect.StructOf: struct size would exceed virtual address space") 2443 } 2444 } 2445 2446 var typ *structType 2447 var ut *uncommonType 2448 2449 if len(methods) == 0 { 2450 t := new(structTypeUncommon) 2451 typ = &t.structType 2452 ut = &t.u 2453 } else { 2454 // A *rtype representing a struct is followed directly in memory by an 2455 // array of method objects representing the methods attached to the 2456 // struct. To get the same layout for a run time generated type, we 2457 // need an array directly following the uncommonType memory. 2458 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2459 tt := New(StructOf([]StructField{ 2460 {Name: "S", Type: TypeOf(structType{})}, 2461 {Name: "U", Type: TypeOf(uncommonType{})}, 2462 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, 2463 })) 2464 2465 typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer()) 2466 ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer()) 2467 2468 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]abi.Method), methods) 2469 } 2470 // TODO(sbinet): Once we allow embedding multiple types, 2471 // methods will need to be sorted like the compiler does. 2472 // TODO(sbinet): Once we allow non-exported methods, we will 2473 // need to compute xcount as the number of exported methods. 2474 ut.Mcount = uint16(len(methods)) 2475 ut.Xcount = ut.Mcount 2476 ut.Moff = uint32(unsafe.Sizeof(uncommonType{})) 2477 2478 if len(fs) > 0 { 2479 repr = append(repr, ' ') 2480 } 2481 repr = append(repr, '}') 2482 hash = fnv1(hash, '}') 2483 str := string(repr) 2484 2485 // Round the size up to be a multiple of the alignment. 2486 s := align(size, uintptr(typalign)) 2487 if s < size { 2488 panic("reflect.StructOf: struct size would exceed virtual address space") 2489 } 2490 size = s 2491 2492 // Make the struct type. 2493 var istruct any = struct{}{} 2494 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2495 *typ = *prototype 2496 typ.Fields = fs 2497 if pkgpath != "" { 2498 typ.PkgPath = newName(pkgpath, "", false, false) 2499 } 2500 2501 // Look in cache. 2502 if ts, ok := structLookupCache.m.Load(hash); ok { 2503 for _, st := range ts.([]Type) { 2504 t := st.common() 2505 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2506 return toType(t) 2507 } 2508 } 2509 } 2510 2511 // Not in cache, lock and retry. 2512 structLookupCache.Lock() 2513 defer structLookupCache.Unlock() 2514 if ts, ok := structLookupCache.m.Load(hash); ok { 2515 for _, st := range ts.([]Type) { 2516 t := st.common() 2517 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2518 return toType(t) 2519 } 2520 } 2521 } 2522 2523 addToCache := func(t Type) Type { 2524 var ts []Type 2525 if ti, ok := structLookupCache.m.Load(hash); ok { 2526 ts = ti.([]Type) 2527 } 2528 structLookupCache.m.Store(hash, append(ts, t)) 2529 return t 2530 } 2531 2532 // Look in known types. 2533 for _, t := range typesByString(str) { 2534 if haveIdenticalUnderlyingType(&typ.Type, t, true) { 2535 // even if 't' wasn't a structType with methods, we should be ok 2536 // as the 'u uncommonType' field won't be accessed except when 2537 // tflag&abi.TFlagUncommon is set. 2538 return addToCache(toType(t)) 2539 } 2540 } 2541 2542 typ.Str = resolveReflectName(newName(str, "", false, false)) 2543 if isRegularMemory(toType(&typ.Type)) { 2544 typ.TFlag = abi.TFlagRegularMemory 2545 } else { 2546 typ.TFlag = 0 2547 } 2548 typ.Hash = hash 2549 typ.Size_ = size 2550 typ.PtrBytes = typeptrdata(&typ.Type) 2551 typ.Align_ = typalign 2552 typ.FieldAlign_ = typalign 2553 typ.PtrToThis = 0 2554 if len(methods) > 0 { 2555 typ.TFlag |= abi.TFlagUncommon 2556 } 2557 2558 if hasGCProg { 2559 lastPtrField := 0 2560 for i, ft := range fs { 2561 if ft.Typ.Pointers() { 2562 lastPtrField = i 2563 } 2564 } 2565 prog := []byte{0, 0, 0, 0} // will be length of prog 2566 var off uintptr 2567 for i, ft := range fs { 2568 if i > lastPtrField { 2569 // gcprog should not include anything for any field after 2570 // the last field that contains pointer data 2571 break 2572 } 2573 if !ft.Typ.Pointers() { 2574 // Ignore pointerless fields. 2575 continue 2576 } 2577 // Pad to start of this field with zeros. 2578 if ft.Offset > off { 2579 n := (ft.Offset - off) / goarch.PtrSize 2580 prog = append(prog, 0x01, 0x00) // emit a 0 bit 2581 if n > 1 { 2582 prog = append(prog, 0x81) // repeat previous bit 2583 prog = appendVarint(prog, n-1) // n-1 times 2584 } 2585 off = ft.Offset 2586 } 2587 2588 prog = appendGCProg(prog, ft.Typ) 2589 off += ft.Typ.PtrBytes 2590 } 2591 prog = append(prog, 0) 2592 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2593 typ.Kind_ |= kindGCProg 2594 typ.GCData = &prog[0] 2595 } else { 2596 typ.Kind_ &^= kindGCProg 2597 bv := new(bitVector) 2598 addTypeBits(bv, 0, &typ.Type) 2599 if len(bv.data) > 0 { 2600 typ.GCData = &bv.data[0] 2601 } 2602 } 2603 typ.Equal = nil 2604 if comparable { 2605 typ.Equal = func(p, q unsafe.Pointer) bool { 2606 for _, ft := range typ.Fields { 2607 pi := add(p, ft.Offset, "&x.field safe") 2608 qi := add(q, ft.Offset, "&x.field safe") 2609 if !ft.Typ.Equal(pi, qi) { 2610 return false 2611 } 2612 } 2613 return true 2614 } 2615 } 2616 2617 switch { 2618 case len(fs) == 1 && !ifaceIndir(fs[0].Typ): 2619 // structs of 1 direct iface type can be direct 2620 typ.Kind_ |= kindDirectIface 2621 default: 2622 typ.Kind_ &^= kindDirectIface 2623 } 2624 2625 return addToCache(toType(&typ.Type)) 2626 } 2627 2628 func embeddedIfaceMethStub() { 2629 panic("reflect: StructOf does not support methods of embedded interfaces") 2630 } 2631 2632 // runtimeStructField takes a StructField value passed to StructOf and 2633 // returns both the corresponding internal representation, of type 2634 // structField, and the pkgpath value to use for this field. 2635 func runtimeStructField(field StructField) (structField, string) { 2636 if field.Anonymous && field.PkgPath != "" { 2637 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") 2638 } 2639 2640 if field.IsExported() { 2641 // Best-effort check for misuse. 2642 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. 2643 c := field.Name[0] 2644 if 'a' <= c && c <= 'z' || c == '_' { 2645 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") 2646 } 2647 } 2648 2649 resolveReflectType(field.Type.common()) // install in runtime 2650 f := structField{ 2651 Name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous), 2652 Typ: field.Type.common(), 2653 Offset: 0, 2654 } 2655 return f, field.PkgPath 2656 } 2657 2658 // typeptrdata returns the length in bytes of the prefix of t 2659 // containing pointer data. Anything after this offset is scalar data. 2660 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go 2661 func typeptrdata(t *abi.Type) uintptr { 2662 switch t.Kind() { 2663 case abi.Struct: 2664 st := (*structType)(unsafe.Pointer(t)) 2665 // find the last field that has pointers. 2666 field := -1 2667 for i := range st.Fields { 2668 ft := st.Fields[i].Typ 2669 if ft.Pointers() { 2670 field = i 2671 } 2672 } 2673 if field == -1 { 2674 return 0 2675 } 2676 f := st.Fields[field] 2677 return f.Offset + f.Typ.PtrBytes 2678 2679 default: 2680 panic("reflect.typeptrdata: unexpected type, " + stringFor(t)) 2681 } 2682 } 2683 2684 // ArrayOf returns the array type with the given length and element type. 2685 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2686 // 2687 // If the resulting type would be larger than the available address space, 2688 // ArrayOf panics. 2689 func ArrayOf(length int, elem Type) Type { 2690 if length < 0 { 2691 panic("reflect: negative length passed to ArrayOf") 2692 } 2693 2694 typ := elem.common() 2695 2696 // Look in cache. 2697 ckey := cacheKey{Array, typ, nil, uintptr(length)} 2698 if array, ok := lookupCache.Load(ckey); ok { 2699 return array.(Type) 2700 } 2701 2702 // Look in known types. 2703 s := "[" + strconv.Itoa(length) + "]" + stringFor(typ) 2704 for _, tt := range typesByString(s) { 2705 array := (*arrayType)(unsafe.Pointer(tt)) 2706 if array.Elem == typ { 2707 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) 2708 return ti.(Type) 2709 } 2710 } 2711 2712 // Make an array type. 2713 var iarray any = [1]unsafe.Pointer{} 2714 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2715 array := *prototype 2716 array.TFlag = typ.TFlag & abi.TFlagRegularMemory 2717 array.Str = resolveReflectName(newName(s, "", false, false)) 2718 array.Hash = fnv1(typ.Hash, '[') 2719 for n := uint32(length); n > 0; n >>= 8 { 2720 array.Hash = fnv1(array.Hash, byte(n)) 2721 } 2722 array.Hash = fnv1(array.Hash, ']') 2723 array.Elem = typ 2724 array.PtrToThis = 0 2725 if typ.Size_ > 0 { 2726 max := ^uintptr(0) / typ.Size_ 2727 if uintptr(length) > max { 2728 panic("reflect.ArrayOf: array size would exceed virtual address space") 2729 } 2730 } 2731 array.Size_ = typ.Size_ * uintptr(length) 2732 if length > 0 && typ.Pointers() { 2733 array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes 2734 } 2735 array.Align_ = typ.Align_ 2736 array.FieldAlign_ = typ.FieldAlign_ 2737 array.Len = uintptr(length) 2738 array.Slice = &(SliceOf(elem).(*rtype).t) 2739 2740 switch { 2741 case !typ.Pointers() || array.Size_ == 0: 2742 // No pointers. 2743 array.GCData = nil 2744 array.PtrBytes = 0 2745 2746 case length == 1: 2747 // In memory, 1-element array looks just like the element. 2748 array.Kind_ |= typ.Kind_ & kindGCProg 2749 array.GCData = typ.GCData 2750 array.PtrBytes = typ.PtrBytes 2751 2752 case typ.Kind_&kindGCProg == 0 && array.Size_ <= abi.MaxPtrmaskBytes*8*goarch.PtrSize: 2753 // Element is small with pointer mask; array is still small. 2754 // Create direct pointer mask by turning each 1 bit in elem 2755 // into length 1 bits in larger mask. 2756 n := (array.PtrBytes/goarch.PtrSize + 7) / 8 2757 // Runtime needs pointer masks to be a multiple of uintptr in size. 2758 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) 2759 mask := make([]byte, n) 2760 emitGCMask(mask, 0, typ, array.Len) 2761 array.GCData = &mask[0] 2762 2763 default: 2764 // Create program that emits one element 2765 // and then repeats to make the array. 2766 prog := []byte{0, 0, 0, 0} // will be length of prog 2767 prog = appendGCProg(prog, typ) 2768 // Pad from ptrdata to size. 2769 elemPtrs := typ.PtrBytes / goarch.PtrSize 2770 elemWords := typ.Size_ / goarch.PtrSize 2771 if elemPtrs < elemWords { 2772 // Emit literal 0 bit, then repeat as needed. 2773 prog = append(prog, 0x01, 0x00) 2774 if elemPtrs+1 < elemWords { 2775 prog = append(prog, 0x81) 2776 prog = appendVarint(prog, elemWords-elemPtrs-1) 2777 } 2778 } 2779 // Repeat length-1 times. 2780 if elemWords < 0x80 { 2781 prog = append(prog, byte(elemWords|0x80)) 2782 } else { 2783 prog = append(prog, 0x80) 2784 prog = appendVarint(prog, elemWords) 2785 } 2786 prog = appendVarint(prog, uintptr(length)-1) 2787 prog = append(prog, 0) 2788 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2789 array.Kind_ |= kindGCProg 2790 array.GCData = &prog[0] 2791 array.PtrBytes = array.Size_ // overestimate but ok; must match program 2792 } 2793 2794 etyp := typ 2795 esize := etyp.Size() 2796 2797 array.Equal = nil 2798 if eequal := etyp.Equal; eequal != nil { 2799 array.Equal = func(p, q unsafe.Pointer) bool { 2800 for i := 0; i < length; i++ { 2801 pi := arrayAt(p, i, esize, "i < length") 2802 qi := arrayAt(q, i, esize, "i < length") 2803 if !eequal(pi, qi) { 2804 return false 2805 } 2806 2807 } 2808 return true 2809 } 2810 } 2811 2812 switch { 2813 case length == 1 && !ifaceIndir(typ): 2814 // array of 1 direct iface type can be direct 2815 array.Kind_ |= kindDirectIface 2816 default: 2817 array.Kind_ &^= kindDirectIface 2818 } 2819 2820 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type)) 2821 return ti.(Type) 2822 } 2823 2824 func appendVarint(x []byte, v uintptr) []byte { 2825 for ; v >= 0x80; v >>= 7 { 2826 x = append(x, byte(v|0x80)) 2827 } 2828 x = append(x, byte(v)) 2829 return x 2830 } 2831 2832 // toType converts from a *rtype to a Type that can be returned 2833 // to the client of package reflect. In gc, the only concern is that 2834 // a nil *rtype must be replaced by a nil Type, but in gccgo this 2835 // function takes care of ensuring that multiple *rtype for the same 2836 // type are coalesced into a single Type. 2837 func toType(t *abi.Type) Type { 2838 if t == nil { 2839 return nil 2840 } 2841 return toRType(t) 2842 } 2843 2844 type layoutKey struct { 2845 ftyp *funcType // function signature 2846 rcvr *abi.Type // receiver type, or nil if none 2847 } 2848 2849 type layoutType struct { 2850 t *abi.Type 2851 framePool *sync.Pool 2852 abid abiDesc 2853 } 2854 2855 var layoutCache sync.Map // map[layoutKey]layoutType 2856 2857 // funcLayout computes a struct type representing the layout of the 2858 // stack-assigned function arguments and return values for the function 2859 // type t. 2860 // If rcvr != nil, rcvr specifies the type of the receiver. 2861 // The returned type exists only for GC, so we only fill out GC relevant info. 2862 // Currently, that's just size and the GC program. We also fill in 2863 // the name for possible debugging use. 2864 func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) { 2865 if t.Kind() != abi.Func { 2866 panic("reflect: funcLayout of non-func type " + stringFor(&t.Type)) 2867 } 2868 if rcvr != nil && rcvr.Kind() == abi.Interface { 2869 panic("reflect: funcLayout with interface receiver " + stringFor(rcvr)) 2870 } 2871 k := layoutKey{t, rcvr} 2872 if lti, ok := layoutCache.Load(k); ok { 2873 lt := lti.(layoutType) 2874 return lt.t, lt.framePool, lt.abid 2875 } 2876 2877 // Compute the ABI layout. 2878 abid = newAbiDesc(t, rcvr) 2879 2880 // build dummy rtype holding gc program 2881 x := &abi.Type{ 2882 Align_: goarch.PtrSize, 2883 // Don't add spill space here; it's only necessary in 2884 // reflectcall's frame, not in the allocated frame. 2885 // TODO(mknyszek): Remove this comment when register 2886 // spill space in the frame is no longer required. 2887 Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize), 2888 PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize, 2889 } 2890 if abid.stackPtrs.n > 0 { 2891 x.GCData = &abid.stackPtrs.data[0] 2892 } 2893 2894 var s string 2895 if rcvr != nil { 2896 s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")" 2897 } else { 2898 s = "funcargs(" + stringFor(&t.Type) + ")" 2899 } 2900 x.Str = resolveReflectName(newName(s, "", false, false)) 2901 2902 // cache result for future callers 2903 framePool = &sync.Pool{New: func() any { 2904 return unsafe_New(x) 2905 }} 2906 lti, _ := layoutCache.LoadOrStore(k, layoutType{ 2907 t: x, 2908 framePool: framePool, 2909 abid: abid, 2910 }) 2911 lt := lti.(layoutType) 2912 return lt.t, lt.framePool, lt.abid 2913 } 2914 2915 // ifaceIndir reports whether t is stored indirectly in an interface value. 2916 func ifaceIndir(t *abi.Type) bool { 2917 return t.Kind_&kindDirectIface == 0 2918 } 2919 2920 // Note: this type must agree with runtime.bitvector. 2921 type bitVector struct { 2922 n uint32 // number of bits 2923 data []byte 2924 } 2925 2926 // append a bit to the bitmap. 2927 func (bv *bitVector) append(bit uint8) { 2928 if bv.n%(8*goarch.PtrSize) == 0 { 2929 // Runtime needs pointer masks to be a multiple of uintptr in size. 2930 // Since reflect passes bv.data directly to the runtime as a pointer mask, 2931 // we append a full uintptr of zeros at a time. 2932 for i := 0; i < goarch.PtrSize; i++ { 2933 bv.data = append(bv.data, 0) 2934 } 2935 } 2936 bv.data[bv.n/8] |= bit << (bv.n % 8) 2937 bv.n++ 2938 } 2939 2940 func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { 2941 if !t.Pointers() { 2942 return 2943 } 2944 2945 switch Kind(t.Kind_ & kindMask) { 2946 case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: 2947 // 1 pointer at start of representation 2948 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 2949 bv.append(0) 2950 } 2951 bv.append(1) 2952 2953 case Interface: 2954 // 2 pointers 2955 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { 2956 bv.append(0) 2957 } 2958 bv.append(1) 2959 bv.append(1) 2960 2961 case Array: 2962 // repeat inner type 2963 tt := (*arrayType)(unsafe.Pointer(t)) 2964 for i := 0; i < int(tt.Len); i++ { 2965 addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem) 2966 } 2967 2968 case Struct: 2969 // apply fields 2970 tt := (*structType)(unsafe.Pointer(t)) 2971 for i := range tt.Fields { 2972 f := &tt.Fields[i] 2973 addTypeBits(bv, offset+f.Offset, f.Typ) 2974 } 2975 } 2976 } 2977 2978 // TypeFor returns the [Type] that represents the type argument T. 2979 func TypeFor[T any]() Type { 2980 var v T 2981 if t := TypeOf(v); t != nil { 2982 return t // optimize for T being a non-interface kind 2983 } 2984 return TypeOf((*T)(nil)).Elem() // only for an interface kind 2985 }