github.com/stingnevermore/go@v0.0.0-20180120041312-3810f5bfed72/src/runtime/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Runtime type representation. 6 7 package runtime 8 9 import "unsafe" 10 11 // tflag is documented in reflect/type.go. 12 // 13 // tflag values must be kept in sync with copies in: 14 // cmd/compile/internal/gc/reflect.go 15 // cmd/link/internal/ld/decodesym.go 16 // reflect/type.go 17 type tflag uint8 18 19 const ( 20 tflagUncommon tflag = 1 << 0 21 tflagExtraStar tflag = 1 << 1 22 tflagNamed tflag = 1 << 2 23 ) 24 25 // Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, 26 // ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and 27 // ../reflect/type.go:/^type.rtype. 28 type _type struct { 29 size uintptr 30 ptrdata uintptr // size of memory prefix holding all pointers 31 hash uint32 32 tflag tflag 33 align uint8 34 fieldalign uint8 35 kind uint8 36 alg *typeAlg 37 // gcdata stores the GC type data for the garbage collector. 38 // If the KindGCProg bit is set in kind, gcdata is a GC program. 39 // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. 40 gcdata *byte 41 str nameOff 42 ptrToThis typeOff 43 } 44 45 func (t *_type) string() string { 46 s := t.nameOff(t.str).name() 47 if t.tflag&tflagExtraStar != 0 { 48 return s[1:] 49 } 50 return s 51 } 52 53 func (t *_type) uncommon() *uncommontype { 54 if t.tflag&tflagUncommon == 0 { 55 return nil 56 } 57 switch t.kind & kindMask { 58 case kindStruct: 59 type u struct { 60 structtype 61 u uncommontype 62 } 63 return &(*u)(unsafe.Pointer(t)).u 64 case kindPtr: 65 type u struct { 66 ptrtype 67 u uncommontype 68 } 69 return &(*u)(unsafe.Pointer(t)).u 70 case kindFunc: 71 type u struct { 72 functype 73 u uncommontype 74 } 75 return &(*u)(unsafe.Pointer(t)).u 76 case kindSlice: 77 type u struct { 78 slicetype 79 u uncommontype 80 } 81 return &(*u)(unsafe.Pointer(t)).u 82 case kindArray: 83 type u struct { 84 arraytype 85 u uncommontype 86 } 87 return &(*u)(unsafe.Pointer(t)).u 88 case kindChan: 89 type u struct { 90 chantype 91 u uncommontype 92 } 93 return &(*u)(unsafe.Pointer(t)).u 94 case kindMap: 95 type u struct { 96 maptype 97 u uncommontype 98 } 99 return &(*u)(unsafe.Pointer(t)).u 100 case kindInterface: 101 type u struct { 102 interfacetype 103 u uncommontype 104 } 105 return &(*u)(unsafe.Pointer(t)).u 106 default: 107 type u struct { 108 _type 109 u uncommontype 110 } 111 return &(*u)(unsafe.Pointer(t)).u 112 } 113 } 114 115 func hasPrefix(s, prefix string) bool { 116 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 117 } 118 119 func (t *_type) name() string { 120 if t.tflag&tflagNamed == 0 { 121 return "" 122 } 123 s := t.string() 124 i := len(s) - 1 125 for i >= 0 { 126 if s[i] == '.' { 127 break 128 } 129 i-- 130 } 131 return s[i+1:] 132 } 133 134 // reflectOffs holds type offsets defined at run time by the reflect package. 135 // 136 // When a type is defined at run time, its *rtype data lives on the heap. 137 // There are a wide range of possible addresses the heap may use, that 138 // may not be representable as a 32-bit offset. Moreover the GC may 139 // one day start moving heap memory, in which case there is no stable 140 // offset that can be defined. 141 // 142 // To provide stable offsets, we add pin *rtype objects in a global map 143 // and treat the offset as an identifier. We use negative offsets that 144 // do not overlap with any compile-time module offsets. 145 // 146 // Entries are created by reflect.addReflectOff. 147 var reflectOffs struct { 148 lock mutex 149 next int32 150 m map[int32]unsafe.Pointer 151 minv map[unsafe.Pointer]int32 152 } 153 154 func reflectOffsLock() { 155 lock(&reflectOffs.lock) 156 if raceenabled { 157 raceacquire(unsafe.Pointer(&reflectOffs.lock)) 158 } 159 } 160 161 func reflectOffsUnlock() { 162 if raceenabled { 163 racerelease(unsafe.Pointer(&reflectOffs.lock)) 164 } 165 unlock(&reflectOffs.lock) 166 } 167 168 func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { 169 if off == 0 { 170 return name{} 171 } 172 base := uintptr(ptrInModule) 173 for md := &firstmoduledata; md != nil; md = md.next { 174 if base >= md.types && base < md.etypes { 175 res := md.types + uintptr(off) 176 if res > md.etypes { 177 println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 178 throw("runtime: name offset out of range") 179 } 180 return name{(*byte)(unsafe.Pointer(res))} 181 } 182 } 183 184 // No module found. see if it is a run time name. 185 reflectOffsLock() 186 res, found := reflectOffs.m[int32(off)] 187 reflectOffsUnlock() 188 if !found { 189 println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") 190 for next := &firstmoduledata; next != nil; next = next.next { 191 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 192 } 193 throw("runtime: name offset base pointer out of range") 194 } 195 return name{(*byte)(res)} 196 } 197 198 func (t *_type) nameOff(off nameOff) name { 199 return resolveNameOff(unsafe.Pointer(t), off) 200 } 201 202 func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type { 203 if off == 0 { 204 return nil 205 } 206 base := uintptr(ptrInModule) 207 var md *moduledata 208 for next := &firstmoduledata; next != nil; next = next.next { 209 if base >= next.types && base < next.etypes { 210 md = next 211 break 212 } 213 } 214 if md == nil { 215 reflectOffsLock() 216 res := reflectOffs.m[int32(off)] 217 reflectOffsUnlock() 218 if res == nil { 219 println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:") 220 for next := &firstmoduledata; next != nil; next = next.next { 221 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 222 } 223 throw("runtime: type offset base pointer out of range") 224 } 225 return (*_type)(res) 226 } 227 if t := md.typemap[off]; t != nil { 228 return t 229 } 230 res := md.types + uintptr(off) 231 if res > md.etypes { 232 println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 233 throw("runtime: type offset out of range") 234 } 235 return (*_type)(unsafe.Pointer(res)) 236 } 237 238 func (t *_type) typeOff(off typeOff) *_type { 239 return resolveTypeOff(unsafe.Pointer(t), off) 240 } 241 242 func (t *_type) textOff(off textOff) unsafe.Pointer { 243 base := uintptr(unsafe.Pointer(t)) 244 var md *moduledata 245 for next := &firstmoduledata; next != nil; next = next.next { 246 if base >= next.types && base < next.etypes { 247 md = next 248 break 249 } 250 } 251 if md == nil { 252 reflectOffsLock() 253 res := reflectOffs.m[int32(off)] 254 reflectOffsUnlock() 255 if res == nil { 256 println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:") 257 for next := &firstmoduledata; next != nil; next = next.next { 258 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 259 } 260 throw("runtime: text offset base pointer out of range") 261 } 262 return res 263 } 264 res := uintptr(0) 265 266 // The text, or instruction stream is generated as one large buffer. The off (offset) for a method is 267 // its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if 268 // the target of calls are too far for the call instruction. To resolve the large text issue, the text is split 269 // into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr 270 // for each text section is set to its offset within the text. Each method's offset is compared against the section 271 // vaddrs and sizes to determine the containing section. Then the section relative offset is added to the section's 272 // relocated baseaddr to compute the method addess. 273 274 if len(md.textsectmap) > 1 { 275 for i := range md.textsectmap { 276 sectaddr := md.textsectmap[i].vaddr 277 sectlen := md.textsectmap[i].length 278 if uintptr(off) >= sectaddr && uintptr(off) <= sectaddr+sectlen { 279 res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr) 280 break 281 } 282 } 283 } else { 284 // single text section 285 res = md.text + uintptr(off) 286 } 287 288 if res > md.etext { 289 println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext)) 290 throw("runtime: text offset out of range") 291 } 292 return unsafe.Pointer(res) 293 } 294 295 func (t *functype) in() []*_type { 296 // See funcType in reflect/type.go for details on data layout. 297 uadd := uintptr(unsafe.Sizeof(functype{})) 298 if t.typ.tflag&tflagUncommon != 0 { 299 uadd += unsafe.Sizeof(uncommontype{}) 300 } 301 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount] 302 } 303 304 func (t *functype) out() []*_type { 305 // See funcType in reflect/type.go for details on data layout. 306 uadd := uintptr(unsafe.Sizeof(functype{})) 307 if t.typ.tflag&tflagUncommon != 0 { 308 uadd += unsafe.Sizeof(uncommontype{}) 309 } 310 outCount := t.outCount & (1<<15 - 1) 311 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 312 } 313 314 func (t *functype) dotdotdot() bool { 315 return t.outCount&(1<<15) != 0 316 } 317 318 type nameOff int32 319 type typeOff int32 320 type textOff int32 321 322 type method struct { 323 name nameOff 324 mtyp typeOff 325 ifn textOff 326 tfn textOff 327 } 328 329 type uncommontype struct { 330 pkgpath nameOff 331 mcount uint16 // number of methods 332 _ uint16 // unused 333 moff uint32 // offset from this uncommontype to [mcount]method 334 _ uint32 // unused 335 } 336 337 type imethod struct { 338 name nameOff 339 ityp typeOff 340 } 341 342 type interfacetype struct { 343 typ _type 344 pkgpath name 345 mhdr []imethod 346 } 347 348 type maptype struct { 349 typ _type 350 key *_type 351 elem *_type 352 bucket *_type // internal type representing a hash bucket 353 hmap *_type // internal type representing a hmap 354 keysize uint8 // size of key slot 355 indirectkey bool // store ptr to key instead of key itself 356 valuesize uint8 // size of value slot 357 indirectvalue bool // store ptr to value instead of value itself 358 bucketsize uint16 // size of bucket 359 reflexivekey bool // true if k==k for all keys 360 needkeyupdate bool // true if we need to update key on an overwrite 361 } 362 363 type arraytype struct { 364 typ _type 365 elem *_type 366 slice *_type 367 len uintptr 368 } 369 370 type chantype struct { 371 typ _type 372 elem *_type 373 dir uintptr 374 } 375 376 type slicetype struct { 377 typ _type 378 elem *_type 379 } 380 381 type functype struct { 382 typ _type 383 inCount uint16 384 outCount uint16 385 } 386 387 type ptrtype struct { 388 typ _type 389 elem *_type 390 } 391 392 type structfield struct { 393 name name 394 typ *_type 395 offsetAnon uintptr 396 } 397 398 func (f *structfield) offset() uintptr { 399 return f.offsetAnon >> 1 400 } 401 402 type structtype struct { 403 typ _type 404 pkgPath name 405 fields []structfield 406 } 407 408 // name is an encoded type name with optional extra data. 409 // See reflect/type.go for details. 410 type name struct { 411 bytes *byte 412 } 413 414 func (n name) data(off int) *byte { 415 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 416 } 417 418 func (n name) isExported() bool { 419 return (*n.bytes)&(1<<0) != 0 420 } 421 422 func (n name) nameLen() int { 423 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 424 } 425 426 func (n name) tagLen() int { 427 if *n.data(0)&(1<<1) == 0 { 428 return 0 429 } 430 off := 3 + n.nameLen() 431 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 432 } 433 434 func (n name) name() (s string) { 435 if n.bytes == nil { 436 return "" 437 } 438 nl := n.nameLen() 439 if nl == 0 { 440 return "" 441 } 442 hdr := (*stringStruct)(unsafe.Pointer(&s)) 443 hdr.str = unsafe.Pointer(n.data(3)) 444 hdr.len = nl 445 return s 446 } 447 448 func (n name) tag() (s string) { 449 tl := n.tagLen() 450 if tl == 0 { 451 return "" 452 } 453 nl := n.nameLen() 454 hdr := (*stringStruct)(unsafe.Pointer(&s)) 455 hdr.str = unsafe.Pointer(n.data(3 + nl + 2)) 456 hdr.len = tl 457 return s 458 } 459 460 func (n name) pkgPath() string { 461 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 462 return "" 463 } 464 off := 3 + n.nameLen() 465 if tl := n.tagLen(); tl > 0 { 466 off += 2 + tl 467 } 468 var nameOff nameOff 469 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 470 pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff) 471 return pkgPathName.name() 472 } 473 474 // typelinksinit scans the types from extra modules and builds the 475 // moduledata typemap used to de-duplicate type pointers. 476 func typelinksinit() { 477 if firstmoduledata.next == nil { 478 return 479 } 480 typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks)) 481 482 modules := activeModules() 483 prev := modules[0] 484 for _, md := range modules[1:] { 485 // Collect types from the previous module into typehash. 486 collect: 487 for _, tl := range prev.typelinks { 488 var t *_type 489 if prev.typemap == nil { 490 t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl))) 491 } else { 492 t = prev.typemap[typeOff(tl)] 493 } 494 // Add to typehash if not seen before. 495 tlist := typehash[t.hash] 496 for _, tcur := range tlist { 497 if tcur == t { 498 continue collect 499 } 500 } 501 typehash[t.hash] = append(tlist, t) 502 } 503 504 if md.typemap == nil { 505 // If any of this module's typelinks match a type from a 506 // prior module, prefer that prior type by adding the offset 507 // to this module's typemap. 508 tm := make(map[typeOff]*_type, len(md.typelinks)) 509 pinnedTypemaps = append(pinnedTypemaps, tm) 510 md.typemap = tm 511 for _, tl := range md.typelinks { 512 t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) 513 for _, candidate := range typehash[t.hash] { 514 seen := map[_typePair]struct{}{} 515 if typesEqual(t, candidate, seen) { 516 t = candidate 517 break 518 } 519 } 520 md.typemap[typeOff(tl)] = t 521 } 522 } 523 524 prev = md 525 } 526 } 527 528 type _typePair struct { 529 t1 *_type 530 t2 *_type 531 } 532 533 // typesEqual reports whether two types are equal. 534 // 535 // Everywhere in the runtime and reflect packages, it is assumed that 536 // there is exactly one *_type per Go type, so that pointer equality 537 // can be used to test if types are equal. There is one place that 538 // breaks this assumption: buildmode=shared. In this case a type can 539 // appear as two different pieces of memory. This is hidden from the 540 // runtime and reflect package by the per-module typemap built in 541 // typelinksinit. It uses typesEqual to map types from later modules 542 // back into earlier ones. 543 // 544 // Only typelinksinit needs this function. 545 func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { 546 tp := _typePair{t, v} 547 if _, ok := seen[tp]; ok { 548 return true 549 } 550 551 // mark these types as seen, and thus equivalent which prevents an infinite loop if 552 // the two types are identical, but recursively defined and loaded from 553 // different modules 554 seen[tp] = struct{}{} 555 556 if t == v { 557 return true 558 } 559 kind := t.kind & kindMask 560 if kind != v.kind&kindMask { 561 return false 562 } 563 if t.string() != v.string() { 564 return false 565 } 566 ut := t.uncommon() 567 uv := v.uncommon() 568 if ut != nil || uv != nil { 569 if ut == nil || uv == nil { 570 return false 571 } 572 pkgpatht := t.nameOff(ut.pkgpath).name() 573 pkgpathv := v.nameOff(uv.pkgpath).name() 574 if pkgpatht != pkgpathv { 575 return false 576 } 577 } 578 if kindBool <= kind && kind <= kindComplex128 { 579 return true 580 } 581 switch kind { 582 case kindString, kindUnsafePointer: 583 return true 584 case kindArray: 585 at := (*arraytype)(unsafe.Pointer(t)) 586 av := (*arraytype)(unsafe.Pointer(v)) 587 return typesEqual(at.elem, av.elem, seen) && at.len == av.len 588 case kindChan: 589 ct := (*chantype)(unsafe.Pointer(t)) 590 cv := (*chantype)(unsafe.Pointer(v)) 591 return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem, seen) 592 case kindFunc: 593 ft := (*functype)(unsafe.Pointer(t)) 594 fv := (*functype)(unsafe.Pointer(v)) 595 if ft.outCount != fv.outCount || ft.inCount != fv.inCount { 596 return false 597 } 598 tin, vin := ft.in(), fv.in() 599 for i := 0; i < len(tin); i++ { 600 if !typesEqual(tin[i], vin[i], seen) { 601 return false 602 } 603 } 604 tout, vout := ft.out(), fv.out() 605 for i := 0; i < len(tout); i++ { 606 if !typesEqual(tout[i], vout[i], seen) { 607 return false 608 } 609 } 610 return true 611 case kindInterface: 612 it := (*interfacetype)(unsafe.Pointer(t)) 613 iv := (*interfacetype)(unsafe.Pointer(v)) 614 if it.pkgpath.name() != iv.pkgpath.name() { 615 return false 616 } 617 if len(it.mhdr) != len(iv.mhdr) { 618 return false 619 } 620 for i := range it.mhdr { 621 tm := &it.mhdr[i] 622 vm := &iv.mhdr[i] 623 // Note the mhdr array can be relocated from 624 // another module. See #17724. 625 tname := resolveNameOff(unsafe.Pointer(tm), tm.name) 626 vname := resolveNameOff(unsafe.Pointer(vm), vm.name) 627 if tname.name() != vname.name() { 628 return false 629 } 630 if tname.pkgPath() != vname.pkgPath() { 631 return false 632 } 633 tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp) 634 vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp) 635 if !typesEqual(tityp, vityp, seen) { 636 return false 637 } 638 } 639 return true 640 case kindMap: 641 mt := (*maptype)(unsafe.Pointer(t)) 642 mv := (*maptype)(unsafe.Pointer(v)) 643 return typesEqual(mt.key, mv.key, seen) && typesEqual(mt.elem, mv.elem, seen) 644 case kindPtr: 645 pt := (*ptrtype)(unsafe.Pointer(t)) 646 pv := (*ptrtype)(unsafe.Pointer(v)) 647 return typesEqual(pt.elem, pv.elem, seen) 648 case kindSlice: 649 st := (*slicetype)(unsafe.Pointer(t)) 650 sv := (*slicetype)(unsafe.Pointer(v)) 651 return typesEqual(st.elem, sv.elem, seen) 652 case kindStruct: 653 st := (*structtype)(unsafe.Pointer(t)) 654 sv := (*structtype)(unsafe.Pointer(v)) 655 if len(st.fields) != len(sv.fields) { 656 return false 657 } 658 if st.pkgPath.name() != sv.pkgPath.name() { 659 return false 660 } 661 for i := range st.fields { 662 tf := &st.fields[i] 663 vf := &sv.fields[i] 664 if tf.name.name() != vf.name.name() { 665 return false 666 } 667 if !typesEqual(tf.typ, vf.typ, seen) { 668 return false 669 } 670 if tf.name.tag() != vf.name.tag() { 671 return false 672 } 673 if tf.offsetAnon != vf.offsetAnon { 674 return false 675 } 676 } 677 return true 678 default: 679 println("runtime: impossible type kind", kind) 680 throw("runtime: impossible type kind") 681 return false 682 } 683 }