github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Runtime type representation. 6 7 package runtime 8 9 import "unsafe" 10 11 // tflag is documented in reflect/type.go. 12 // 13 // tflag values must be kept in sync with copies in: 14 // cmd/compile/internal/gc/reflect.go 15 // cmd/link/internal/ld/decodesym.go 16 // reflect/type.go 17 type tflag uint8 18 19 const ( 20 tflagUncommon tflag = 1 << 0 21 tflagExtraStar tflag = 1 << 1 22 tflagNamed tflag = 1 << 2 23 ) 24 25 // Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, 26 // ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and 27 // ../reflect/type.go:/^type.rtype. 28 type _type struct { 29 size uintptr 30 ptrdata uintptr // size of memory prefix holding all pointers 31 hash uint32 32 tflag tflag 33 align uint8 34 fieldalign uint8 35 kind uint8 36 alg *typeAlg 37 // gcdata stores the GC type data for the garbage collector. 38 // If the KindGCProg bit is set in kind, gcdata is a GC program. 39 // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. 40 gcdata *byte 41 str nameOff 42 ptrToThis typeOff 43 } 44 45 func (t *_type) string() string { 46 s := t.nameOff(t.str).name() 47 if t.tflag&tflagExtraStar != 0 { 48 return s[1:] 49 } 50 return s 51 } 52 53 func (t *_type) uncommon() *uncommontype { 54 if t.tflag&tflagUncommon == 0 { 55 return nil 56 } 57 switch t.kind & kindMask { 58 case kindStruct: 59 type u struct { 60 structtype 61 u uncommontype 62 } 63 return &(*u)(unsafe.Pointer(t)).u 64 case kindPtr: 65 type u struct { 66 ptrtype 67 u uncommontype 68 } 69 return &(*u)(unsafe.Pointer(t)).u 70 case kindFunc: 71 type u struct { 72 functype 73 u uncommontype 74 } 75 return &(*u)(unsafe.Pointer(t)).u 76 case kindSlice: 77 type u struct { 78 slicetype 79 u uncommontype 80 } 81 return &(*u)(unsafe.Pointer(t)).u 82 case kindArray: 83 type u struct { 84 arraytype 85 u uncommontype 86 } 87 return &(*u)(unsafe.Pointer(t)).u 88 case kindChan: 89 type u struct { 90 chantype 91 u uncommontype 92 } 93 return &(*u)(unsafe.Pointer(t)).u 94 case kindMap: 95 type u struct { 96 maptype 97 u uncommontype 98 } 99 return &(*u)(unsafe.Pointer(t)).u 100 case kindInterface: 101 type u struct { 102 interfacetype 103 u uncommontype 104 } 105 return &(*u)(unsafe.Pointer(t)).u 106 default: 107 type u struct { 108 _type 109 u uncommontype 110 } 111 return &(*u)(unsafe.Pointer(t)).u 112 } 113 } 114 115 func (t *_type) name() string { 116 if t.tflag&tflagNamed == 0 { 117 return "" 118 } 119 s := t.string() 120 i := len(s) - 1 121 for i >= 0 { 122 if s[i] == '.' { 123 break 124 } 125 i-- 126 } 127 return s[i+1:] 128 } 129 130 // pkgpath returns the path of the package where t was defined, if 131 // available. This is not the same as the reflect package's PkgPath 132 // method, in that it returns the package path for struct and interface 133 // types, not just named types. 134 func (t *_type) pkgpath() string { 135 if u := t.uncommon(); u != nil { 136 return t.nameOff(u.pkgpath).name() 137 } 138 switch t.kind & kindMask { 139 case kindStruct: 140 st := (*structtype)(unsafe.Pointer(t)) 141 return st.pkgPath.name() 142 case kindInterface: 143 it := (*interfacetype)(unsafe.Pointer(t)) 144 return it.pkgpath.name() 145 } 146 return "" 147 } 148 149 // reflectOffs holds type offsets defined at run time by the reflect package. 150 // 151 // When a type is defined at run time, its *rtype data lives on the heap. 152 // There are a wide range of possible addresses the heap may use, that 153 // may not be representable as a 32-bit offset. Moreover the GC may 154 // one day start moving heap memory, in which case there is no stable 155 // offset that can be defined. 156 // 157 // To provide stable offsets, we add pin *rtype objects in a global map 158 // and treat the offset as an identifier. We use negative offsets that 159 // do not overlap with any compile-time module offsets. 160 // 161 // Entries are created by reflect.addReflectOff. 162 var reflectOffs struct { 163 lock mutex 164 next int32 165 m map[int32]unsafe.Pointer 166 minv map[unsafe.Pointer]int32 167 } 168 169 func reflectOffsLock() { 170 lock(&reflectOffs.lock) 171 if raceenabled { 172 raceacquire(unsafe.Pointer(&reflectOffs.lock)) 173 } 174 } 175 176 func reflectOffsUnlock() { 177 if raceenabled { 178 racerelease(unsafe.Pointer(&reflectOffs.lock)) 179 } 180 unlock(&reflectOffs.lock) 181 } 182 183 func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { 184 if off == 0 { 185 return name{} 186 } 187 base := uintptr(ptrInModule) 188 for md := &firstmoduledata; md != nil; md = md.next { 189 if base >= md.types && base < md.etypes { 190 res := md.types + uintptr(off) 191 if res > md.etypes { 192 println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 193 throw("runtime: name offset out of range") 194 } 195 return name{(*byte)(unsafe.Pointer(res))} 196 } 197 } 198 199 // No module found. see if it is a run time name. 200 reflectOffsLock() 201 res, found := reflectOffs.m[int32(off)] 202 reflectOffsUnlock() 203 if !found { 204 println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") 205 for next := &firstmoduledata; next != nil; next = next.next { 206 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 207 } 208 throw("runtime: name offset base pointer out of range") 209 } 210 return name{(*byte)(res)} 211 } 212 213 func (t *_type) nameOff(off nameOff) name { 214 return resolveNameOff(unsafe.Pointer(t), off) 215 } 216 217 func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type { 218 if off == 0 { 219 return nil 220 } 221 base := uintptr(ptrInModule) 222 var md *moduledata 223 for next := &firstmoduledata; next != nil; next = next.next { 224 if base >= next.types && base < next.etypes { 225 md = next 226 break 227 } 228 } 229 if md == nil { 230 reflectOffsLock() 231 res := reflectOffs.m[int32(off)] 232 reflectOffsUnlock() 233 if res == nil { 234 println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:") 235 for next := &firstmoduledata; next != nil; next = next.next { 236 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 237 } 238 throw("runtime: type offset base pointer out of range") 239 } 240 return (*_type)(res) 241 } 242 if t := md.typemap[off]; t != nil { 243 return t 244 } 245 res := md.types + uintptr(off) 246 if res > md.etypes { 247 println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 248 throw("runtime: type offset out of range") 249 } 250 return (*_type)(unsafe.Pointer(res)) 251 } 252 253 func (t *_type) typeOff(off typeOff) *_type { 254 return resolveTypeOff(unsafe.Pointer(t), off) 255 } 256 257 func (t *_type) textOff(off textOff) unsafe.Pointer { 258 base := uintptr(unsafe.Pointer(t)) 259 var md *moduledata 260 for next := &firstmoduledata; next != nil; next = next.next { 261 if base >= next.types && base < next.etypes { 262 md = next 263 break 264 } 265 } 266 if md == nil { 267 reflectOffsLock() 268 res := reflectOffs.m[int32(off)] 269 reflectOffsUnlock() 270 if res == nil { 271 println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:") 272 for next := &firstmoduledata; next != nil; next = next.next { 273 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 274 } 275 throw("runtime: text offset base pointer out of range") 276 } 277 return res 278 } 279 res := uintptr(0) 280 281 // The text, or instruction stream is generated as one large buffer. The off (offset) for a method is 282 // its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if 283 // the target of calls are too far for the call instruction. To resolve the large text issue, the text is split 284 // into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr 285 // for each text section is set to its offset within the text. Each method's offset is compared against the section 286 // vaddrs and sizes to determine the containing section. Then the section relative offset is added to the section's 287 // relocated baseaddr to compute the method addess. 288 289 if len(md.textsectmap) > 1 { 290 for i := range md.textsectmap { 291 sectaddr := md.textsectmap[i].vaddr 292 sectlen := md.textsectmap[i].length 293 if uintptr(off) >= sectaddr && uintptr(off) <= sectaddr+sectlen { 294 res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr) 295 break 296 } 297 } 298 } else { 299 // single text section 300 res = md.text + uintptr(off) 301 } 302 303 if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory 304 println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext)) 305 throw("runtime: text offset out of range") 306 } 307 return unsafe.Pointer(res) 308 } 309 310 func (t *functype) in() []*_type { 311 // See funcType in reflect/type.go for details on data layout. 312 uadd := uintptr(unsafe.Sizeof(functype{})) 313 if t.typ.tflag&tflagUncommon != 0 { 314 uadd += unsafe.Sizeof(uncommontype{}) 315 } 316 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount] 317 } 318 319 func (t *functype) out() []*_type { 320 // See funcType in reflect/type.go for details on data layout. 321 uadd := uintptr(unsafe.Sizeof(functype{})) 322 if t.typ.tflag&tflagUncommon != 0 { 323 uadd += unsafe.Sizeof(uncommontype{}) 324 } 325 outCount := t.outCount & (1<<15 - 1) 326 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 327 } 328 329 func (t *functype) dotdotdot() bool { 330 return t.outCount&(1<<15) != 0 331 } 332 333 type nameOff int32 334 type typeOff int32 335 type textOff int32 336 337 type method struct { 338 name nameOff 339 mtyp typeOff 340 ifn textOff 341 tfn textOff 342 } 343 344 type uncommontype struct { 345 pkgpath nameOff 346 mcount uint16 // number of methods 347 xcount uint16 // number of exported methods 348 moff uint32 // offset from this uncommontype to [mcount]method 349 _ uint32 // unused 350 } 351 352 type imethod struct { 353 name nameOff 354 ityp typeOff 355 } 356 357 type interfacetype struct { 358 typ _type 359 pkgpath name 360 mhdr []imethod 361 } 362 363 type maptype struct { 364 typ _type 365 key *_type 366 elem *_type 367 bucket *_type // internal type representing a hash bucket 368 keysize uint8 // size of key slot 369 valuesize uint8 // size of value slot 370 bucketsize uint16 // size of bucket 371 flags uint32 372 } 373 374 // Note: flag values must match those used in the TMAP case 375 // in ../cmd/compile/internal/gc/reflect.go:dtypesym. 376 func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself 377 return mt.flags&1 != 0 378 } 379 func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself 380 return mt.flags&2 != 0 381 } 382 func (mt *maptype) reflexivekey() bool { // true if k==k for all keys 383 return mt.flags&4 != 0 384 } 385 func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite 386 return mt.flags&8 != 0 387 } 388 func (mt *maptype) hashMightPanic() bool { // true if hash function might panic 389 return mt.flags&16 != 0 390 } 391 392 type arraytype struct { 393 typ _type 394 elem *_type 395 slice *_type 396 len uintptr 397 } 398 399 type chantype struct { 400 typ _type 401 elem *_type 402 dir uintptr 403 } 404 405 type slicetype struct { 406 typ _type 407 elem *_type 408 } 409 410 type functype struct { 411 typ _type 412 inCount uint16 413 outCount uint16 414 } 415 416 type ptrtype struct { 417 typ _type 418 elem *_type 419 } 420 421 type structfield struct { 422 name name 423 typ *_type 424 offsetAnon uintptr 425 } 426 427 func (f *structfield) offset() uintptr { 428 return f.offsetAnon >> 1 429 } 430 431 type structtype struct { 432 typ _type 433 pkgPath name 434 fields []structfield 435 } 436 437 // name is an encoded type name with optional extra data. 438 // See reflect/type.go for details. 439 type name struct { 440 bytes *byte 441 } 442 443 func (n name) data(off int) *byte { 444 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 445 } 446 447 func (n name) isExported() bool { 448 return (*n.bytes)&(1<<0) != 0 449 } 450 451 func (n name) nameLen() int { 452 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 453 } 454 455 func (n name) tagLen() int { 456 if *n.data(0)&(1<<1) == 0 { 457 return 0 458 } 459 off := 3 + n.nameLen() 460 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 461 } 462 463 func (n name) name() (s string) { 464 if n.bytes == nil { 465 return "" 466 } 467 nl := n.nameLen() 468 if nl == 0 { 469 return "" 470 } 471 hdr := (*stringStruct)(unsafe.Pointer(&s)) 472 hdr.str = unsafe.Pointer(n.data(3)) 473 hdr.len = nl 474 return s 475 } 476 477 func (n name) tag() (s string) { 478 tl := n.tagLen() 479 if tl == 0 { 480 return "" 481 } 482 nl := n.nameLen() 483 hdr := (*stringStruct)(unsafe.Pointer(&s)) 484 hdr.str = unsafe.Pointer(n.data(3 + nl + 2)) 485 hdr.len = tl 486 return s 487 } 488 489 func (n name) pkgPath() string { 490 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 491 return "" 492 } 493 off := 3 + n.nameLen() 494 if tl := n.tagLen(); tl > 0 { 495 off += 2 + tl 496 } 497 var nameOff nameOff 498 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 499 pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff) 500 return pkgPathName.name() 501 } 502 503 // typelinksinit scans the types from extra modules and builds the 504 // moduledata typemap used to de-duplicate type pointers. 505 func typelinksinit() { 506 if firstmoduledata.next == nil { 507 return 508 } 509 typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks)) 510 511 modules := activeModules() 512 prev := modules[0] 513 for _, md := range modules[1:] { 514 // Collect types from the previous module into typehash. 515 collect: 516 for _, tl := range prev.typelinks { 517 var t *_type 518 if prev.typemap == nil { 519 t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl))) 520 } else { 521 t = prev.typemap[typeOff(tl)] 522 } 523 // Add to typehash if not seen before. 524 tlist := typehash[t.hash] 525 for _, tcur := range tlist { 526 if tcur == t { 527 continue collect 528 } 529 } 530 typehash[t.hash] = append(tlist, t) 531 } 532 533 if md.typemap == nil { 534 // If any of this module's typelinks match a type from a 535 // prior module, prefer that prior type by adding the offset 536 // to this module's typemap. 537 tm := make(map[typeOff]*_type, len(md.typelinks)) 538 pinnedTypemaps = append(pinnedTypemaps, tm) 539 md.typemap = tm 540 for _, tl := range md.typelinks { 541 t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) 542 for _, candidate := range typehash[t.hash] { 543 seen := map[_typePair]struct{}{} 544 if typesEqual(t, candidate, seen) { 545 t = candidate 546 break 547 } 548 } 549 md.typemap[typeOff(tl)] = t 550 } 551 } 552 553 prev = md 554 } 555 } 556 557 type _typePair struct { 558 t1 *_type 559 t2 *_type 560 } 561 562 // typesEqual reports whether two types are equal. 563 // 564 // Everywhere in the runtime and reflect packages, it is assumed that 565 // there is exactly one *_type per Go type, so that pointer equality 566 // can be used to test if types are equal. There is one place that 567 // breaks this assumption: buildmode=shared. In this case a type can 568 // appear as two different pieces of memory. This is hidden from the 569 // runtime and reflect package by the per-module typemap built in 570 // typelinksinit. It uses typesEqual to map types from later modules 571 // back into earlier ones. 572 // 573 // Only typelinksinit needs this function. 574 func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { 575 tp := _typePair{t, v} 576 if _, ok := seen[tp]; ok { 577 return true 578 } 579 580 // mark these types as seen, and thus equivalent which prevents an infinite loop if 581 // the two types are identical, but recursively defined and loaded from 582 // different modules 583 seen[tp] = struct{}{} 584 585 if t == v { 586 return true 587 } 588 kind := t.kind & kindMask 589 if kind != v.kind&kindMask { 590 return false 591 } 592 if t.string() != v.string() { 593 return false 594 } 595 ut := t.uncommon() 596 uv := v.uncommon() 597 if ut != nil || uv != nil { 598 if ut == nil || uv == nil { 599 return false 600 } 601 pkgpatht := t.nameOff(ut.pkgpath).name() 602 pkgpathv := v.nameOff(uv.pkgpath).name() 603 if pkgpatht != pkgpathv { 604 return false 605 } 606 } 607 if kindBool <= kind && kind <= kindComplex128 { 608 return true 609 } 610 switch kind { 611 case kindString, kindUnsafePointer: 612 return true 613 case kindArray: 614 at := (*arraytype)(unsafe.Pointer(t)) 615 av := (*arraytype)(unsafe.Pointer(v)) 616 return typesEqual(at.elem, av.elem, seen) && at.len == av.len 617 case kindChan: 618 ct := (*chantype)(unsafe.Pointer(t)) 619 cv := (*chantype)(unsafe.Pointer(v)) 620 return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem, seen) 621 case kindFunc: 622 ft := (*functype)(unsafe.Pointer(t)) 623 fv := (*functype)(unsafe.Pointer(v)) 624 if ft.outCount != fv.outCount || ft.inCount != fv.inCount { 625 return false 626 } 627 tin, vin := ft.in(), fv.in() 628 for i := 0; i < len(tin); i++ { 629 if !typesEqual(tin[i], vin[i], seen) { 630 return false 631 } 632 } 633 tout, vout := ft.out(), fv.out() 634 for i := 0; i < len(tout); i++ { 635 if !typesEqual(tout[i], vout[i], seen) { 636 return false 637 } 638 } 639 return true 640 case kindInterface: 641 it := (*interfacetype)(unsafe.Pointer(t)) 642 iv := (*interfacetype)(unsafe.Pointer(v)) 643 if it.pkgpath.name() != iv.pkgpath.name() { 644 return false 645 } 646 if len(it.mhdr) != len(iv.mhdr) { 647 return false 648 } 649 for i := range it.mhdr { 650 tm := &it.mhdr[i] 651 vm := &iv.mhdr[i] 652 // Note the mhdr array can be relocated from 653 // another module. See #17724. 654 tname := resolveNameOff(unsafe.Pointer(tm), tm.name) 655 vname := resolveNameOff(unsafe.Pointer(vm), vm.name) 656 if tname.name() != vname.name() { 657 return false 658 } 659 if tname.pkgPath() != vname.pkgPath() { 660 return false 661 } 662 tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp) 663 vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp) 664 if !typesEqual(tityp, vityp, seen) { 665 return false 666 } 667 } 668 return true 669 case kindMap: 670 mt := (*maptype)(unsafe.Pointer(t)) 671 mv := (*maptype)(unsafe.Pointer(v)) 672 return typesEqual(mt.key, mv.key, seen) && typesEqual(mt.elem, mv.elem, seen) 673 case kindPtr: 674 pt := (*ptrtype)(unsafe.Pointer(t)) 675 pv := (*ptrtype)(unsafe.Pointer(v)) 676 return typesEqual(pt.elem, pv.elem, seen) 677 case kindSlice: 678 st := (*slicetype)(unsafe.Pointer(t)) 679 sv := (*slicetype)(unsafe.Pointer(v)) 680 return typesEqual(st.elem, sv.elem, seen) 681 case kindStruct: 682 st := (*structtype)(unsafe.Pointer(t)) 683 sv := (*structtype)(unsafe.Pointer(v)) 684 if len(st.fields) != len(sv.fields) { 685 return false 686 } 687 if st.pkgPath.name() != sv.pkgPath.name() { 688 return false 689 } 690 for i := range st.fields { 691 tf := &st.fields[i] 692 vf := &sv.fields[i] 693 if tf.name.name() != vf.name.name() { 694 return false 695 } 696 if !typesEqual(tf.typ, vf.typ, seen) { 697 return false 698 } 699 if tf.name.tag() != vf.name.tag() { 700 return false 701 } 702 if tf.offsetAnon != vf.offsetAnon { 703 return false 704 } 705 } 706 return true 707 default: 708 println("runtime: impossible type kind", kind) 709 throw("runtime: impossible type kind") 710 return false 711 } 712 }