github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/runtime/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Runtime type representation. 6 7 package runtime 8 9 import "unsafe" 10 11 // tflag is documented in reflect/type.go. 12 // 13 // tflag values must be kept in sync with copies in: 14 // cmd/compile/internal/gc/reflect.go 15 // cmd/link/internal/ld/decodesym.go 16 // reflect/type.go 17 type tflag uint8 18 19 const ( 20 tflagUncommon tflag = 1 << 0 21 tflagExtraStar tflag = 1 << 1 22 tflagNamed tflag = 1 << 2 23 ) 24 25 // Needs to be in sync with ../cmd/compile/internal/ld/decodesym.go:/^func.commonsize, 26 // ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and 27 // ../reflect/type.go:/^type.rtype. 28 type _type struct { 29 size uintptr 30 ptrdata uintptr // size of memory prefix holding all pointers 31 hash uint32 32 tflag tflag 33 align uint8 34 fieldalign uint8 35 kind uint8 36 alg *typeAlg 37 // gcdata stores the GC type data for the garbage collector. 38 // If the KindGCProg bit is set in kind, gcdata is a GC program. 39 // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. 40 gcdata *byte 41 str nameOff 42 ptrToThis typeOff 43 } 44 45 func (t *_type) string() string { 46 s := t.nameOff(t.str).name() 47 if t.tflag&tflagExtraStar != 0 { 48 return s[1:] 49 } 50 return s 51 } 52 53 func (t *_type) uncommon() *uncommontype { 54 if t.tflag&tflagUncommon == 0 { 55 return nil 56 } 57 switch t.kind & kindMask { 58 case kindStruct: 59 type u struct { 60 structtype 61 u uncommontype 62 } 63 return &(*u)(unsafe.Pointer(t)).u 64 case kindPtr: 65 type u struct { 66 ptrtype 67 u uncommontype 68 } 69 return &(*u)(unsafe.Pointer(t)).u 70 case kindFunc: 71 type u struct { 72 functype 73 u uncommontype 74 } 75 return &(*u)(unsafe.Pointer(t)).u 76 case kindSlice: 77 type u struct { 78 slicetype 79 u uncommontype 80 } 81 return &(*u)(unsafe.Pointer(t)).u 82 case kindArray: 83 type u struct { 84 arraytype 85 u uncommontype 86 } 87 return &(*u)(unsafe.Pointer(t)).u 88 case kindChan: 89 type u struct { 90 chantype 91 u uncommontype 92 } 93 return &(*u)(unsafe.Pointer(t)).u 94 case kindMap: 95 type u struct { 96 maptype 97 u uncommontype 98 } 99 return &(*u)(unsafe.Pointer(t)).u 100 case kindInterface: 101 type u struct { 102 interfacetype 103 u uncommontype 104 } 105 return &(*u)(unsafe.Pointer(t)).u 106 default: 107 type u struct { 108 _type 109 u uncommontype 110 } 111 return &(*u)(unsafe.Pointer(t)).u 112 } 113 } 114 115 func hasPrefix(s, prefix string) bool { 116 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 117 } 118 119 func (t *_type) name() string { 120 if t.tflag&tflagNamed == 0 { 121 return "" 122 } 123 s := t.string() 124 i := len(s) - 1 125 for i >= 0 { 126 if s[i] == '.' { 127 break 128 } 129 i-- 130 } 131 return s[i+1:] 132 } 133 134 // reflectOffs holds type offsets defined at run time by the reflect package. 135 // 136 // When a type is defined at run time, its *rtype data lives on the heap. 137 // There are a wide range of possible addresses the heap may use, that 138 // may not be representable as a 32-bit offset. Moreover the GC may 139 // one day start moving heap memory, in which case there is no stable 140 // offset that can be defined. 141 // 142 // To provide stable offsets, we add pin *rtype objects in a global map 143 // and treat the offset as an identifier. We use negative offsets that 144 // do not overlap with any compile-time module offsets. 145 // 146 // Entries are created by reflect.addReflectOff. 147 var reflectOffs struct { 148 lock mutex 149 next int32 150 m map[int32]unsafe.Pointer 151 minv map[unsafe.Pointer]int32 152 } 153 154 func reflectOffsLock() { 155 lock(&reflectOffs.lock) 156 if raceenabled { 157 raceacquire(unsafe.Pointer(&reflectOffs.lock)) 158 } 159 } 160 161 func reflectOffsUnlock() { 162 if raceenabled { 163 racerelease(unsafe.Pointer(&reflectOffs.lock)) 164 } 165 unlock(&reflectOffs.lock) 166 } 167 168 func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { 169 if off == 0 { 170 return name{} 171 } 172 base := uintptr(ptrInModule) 173 for md := &firstmoduledata; md != nil; md = md.next { 174 if base >= md.types && base < md.etypes { 175 res := md.types + uintptr(off) 176 if res > md.etypes { 177 println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 178 throw("runtime: name offset out of range") 179 } 180 return name{(*byte)(unsafe.Pointer(res))} 181 } 182 } 183 184 // No module found. see if it is a run time name. 185 reflectOffsLock() 186 res, found := reflectOffs.m[int32(off)] 187 reflectOffsUnlock() 188 if !found { 189 println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") 190 for next := &firstmoduledata; next != nil; next = next.next { 191 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 192 } 193 throw("runtime: name offset base pointer out of range") 194 } 195 return name{(*byte)(res)} 196 } 197 198 func (t *_type) nameOff(off nameOff) name { 199 return resolveNameOff(unsafe.Pointer(t), off) 200 } 201 202 func (t *_type) typeOff(off typeOff) *_type { 203 if off == 0 { 204 return nil 205 } 206 base := uintptr(unsafe.Pointer(t)) 207 var md *moduledata 208 for next := &firstmoduledata; next != nil; next = next.next { 209 if base >= next.types && base < next.etypes { 210 md = next 211 break 212 } 213 } 214 if md == nil { 215 reflectOffsLock() 216 res := reflectOffs.m[int32(off)] 217 reflectOffsUnlock() 218 if res == nil { 219 println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:") 220 for next := &firstmoduledata; next != nil; next = next.next { 221 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 222 } 223 throw("runtime: type offset base pointer out of range") 224 } 225 return (*_type)(res) 226 } 227 if t := md.typemap[off]; t != nil { 228 return t 229 } 230 res := md.types + uintptr(off) 231 if res > md.etypes { 232 println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 233 throw("runtime: type offset out of range") 234 } 235 return (*_type)(unsafe.Pointer(res)) 236 } 237 238 func (t *_type) textOff(off textOff) unsafe.Pointer { 239 base := uintptr(unsafe.Pointer(t)) 240 var md *moduledata 241 for next := &firstmoduledata; next != nil; next = next.next { 242 if base >= next.types && base < next.etypes { 243 md = next 244 break 245 } 246 } 247 if md == nil { 248 reflectOffsLock() 249 res := reflectOffs.m[int32(off)] 250 reflectOffsUnlock() 251 if res == nil { 252 println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:") 253 for next := &firstmoduledata; next != nil; next = next.next { 254 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 255 } 256 throw("runtime: text offset base pointer out of range") 257 } 258 return res 259 } 260 res := uintptr(0) 261 262 // The text, or instruction stream is generated as one large buffer. The off (offset) for a method is 263 // its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if 264 // the target of calls are too far for the call instruction. To resolve the large text issue, the text is split 265 // into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr 266 // for each text section is set to its offset within the text. Each method's offset is compared against the section 267 // vaddrs and sizes to determine the containing section. Then the section relative offset is added to the section's 268 // relocated baseaddr to compute the method addess. 269 270 if len(md.textsectmap) > 1 { 271 for i := range md.textsectmap { 272 sectaddr := md.textsectmap[i].vaddr 273 sectlen := md.textsectmap[i].length 274 if uintptr(off) >= sectaddr && uintptr(off) <= sectaddr+sectlen { 275 res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr) 276 break 277 } 278 } 279 } else { 280 // single text section 281 res = md.text + uintptr(off) 282 } 283 284 if res > md.etext { 285 println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext)) 286 throw("runtime: text offset out of range") 287 } 288 return unsafe.Pointer(res) 289 } 290 291 func (t *functype) in() []*_type { 292 // See funcType in reflect/type.go for details on data layout. 293 uadd := uintptr(unsafe.Sizeof(functype{})) 294 if t.typ.tflag&tflagUncommon != 0 { 295 uadd += unsafe.Sizeof(uncommontype{}) 296 } 297 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount] 298 } 299 300 func (t *functype) out() []*_type { 301 // See funcType in reflect/type.go for details on data layout. 302 uadd := uintptr(unsafe.Sizeof(functype{})) 303 if t.typ.tflag&tflagUncommon != 0 { 304 uadd += unsafe.Sizeof(uncommontype{}) 305 } 306 outCount := t.outCount & (1<<15 - 1) 307 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 308 } 309 310 func (t *functype) dotdotdot() bool { 311 return t.outCount&(1<<15) != 0 312 } 313 314 type nameOff int32 315 type typeOff int32 316 type textOff int32 317 318 type method struct { 319 name nameOff 320 mtyp typeOff 321 ifn textOff 322 tfn textOff 323 } 324 325 type uncommontype struct { 326 pkgpath nameOff 327 mcount uint16 // number of methods 328 _ uint16 // unused 329 moff uint32 // offset from this uncommontype to [mcount]method 330 _ uint32 // unused 331 } 332 333 type imethod struct { 334 name nameOff 335 ityp typeOff 336 } 337 338 type interfacetype struct { 339 typ _type 340 pkgpath name 341 mhdr []imethod 342 } 343 344 type maptype struct { 345 typ _type 346 key *_type 347 elem *_type 348 bucket *_type // internal type representing a hash bucket 349 hmap *_type // internal type representing a hmap 350 keysize uint8 // size of key slot 351 indirectkey bool // store ptr to key instead of key itself 352 valuesize uint8 // size of value slot 353 indirectvalue bool // store ptr to value instead of value itself 354 bucketsize uint16 // size of bucket 355 reflexivekey bool // true if k==k for all keys 356 needkeyupdate bool // true if we need to update key on an overwrite 357 } 358 359 type arraytype struct { 360 typ _type 361 elem *_type 362 slice *_type 363 len uintptr 364 } 365 366 type chantype struct { 367 typ _type 368 elem *_type 369 dir uintptr 370 } 371 372 type slicetype struct { 373 typ _type 374 elem *_type 375 } 376 377 type functype struct { 378 typ _type 379 inCount uint16 380 outCount uint16 381 } 382 383 type ptrtype struct { 384 typ _type 385 elem *_type 386 } 387 388 type structfield struct { 389 name name 390 typ *_type 391 offset uintptr 392 } 393 394 type structtype struct { 395 typ _type 396 pkgPath name 397 fields []structfield 398 } 399 400 // name is an encoded type name with optional extra data. 401 // See reflect/type.go for details. 402 type name struct { 403 bytes *byte 404 } 405 406 func (n name) data(off int) *byte { 407 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 408 } 409 410 func (n name) isExported() bool { 411 return (*n.bytes)&(1<<0) != 0 412 } 413 414 func (n name) nameLen() int { 415 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 416 } 417 418 func (n name) tagLen() int { 419 if *n.data(0)&(1<<1) == 0 { 420 return 0 421 } 422 off := 3 + n.nameLen() 423 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 424 } 425 426 func (n name) name() (s string) { 427 if n.bytes == nil { 428 return "" 429 } 430 nl := n.nameLen() 431 if nl == 0 { 432 return "" 433 } 434 hdr := (*stringStruct)(unsafe.Pointer(&s)) 435 hdr.str = unsafe.Pointer(n.data(3)) 436 hdr.len = nl 437 return s 438 } 439 440 func (n name) tag() (s string) { 441 tl := n.tagLen() 442 if tl == 0 { 443 return "" 444 } 445 nl := n.nameLen() 446 hdr := (*stringStruct)(unsafe.Pointer(&s)) 447 hdr.str = unsafe.Pointer(n.data(3 + nl + 2)) 448 hdr.len = tl 449 return s 450 } 451 452 func (n name) pkgPath() string { 453 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 454 return "" 455 } 456 off := 3 + n.nameLen() 457 if tl := n.tagLen(); tl > 0 { 458 off += 2 + tl 459 } 460 var nameOff nameOff 461 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 462 pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff) 463 return pkgPathName.name() 464 } 465 466 // typelinksinit scans the types from extra modules and builds the 467 // moduledata typemap used to de-duplicate type pointers. 468 func typelinksinit() { 469 if firstmoduledata.next == nil { 470 return 471 } 472 typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks)) 473 474 prev := &firstmoduledata 475 md := firstmoduledata.next 476 for md != nil { 477 // Collect types from the previous module into typehash. 478 collect: 479 for _, tl := range prev.typelinks { 480 var t *_type 481 if prev.typemap == nil { 482 t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl))) 483 } else { 484 t = prev.typemap[typeOff(tl)] 485 } 486 // Add to typehash if not seen before. 487 tlist := typehash[t.hash] 488 for _, tcur := range tlist { 489 if tcur == t { 490 continue collect 491 } 492 } 493 typehash[t.hash] = append(tlist, t) 494 } 495 496 if md.typemap == nil { 497 // If any of this module's typelinks match a type from a 498 // prior module, prefer that prior type by adding the offset 499 // to this module's typemap. 500 tm := make(map[typeOff]*_type, len(md.typelinks)) 501 pinnedTypemaps = append(pinnedTypemaps, tm) 502 md.typemap = tm 503 for _, tl := range md.typelinks { 504 t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) 505 for _, candidate := range typehash[t.hash] { 506 if typesEqual(t, candidate) { 507 t = candidate 508 break 509 } 510 } 511 md.typemap[typeOff(tl)] = t 512 } 513 } 514 515 prev = md 516 md = md.next 517 } 518 } 519 520 // typesEqual reports whether two types are equal. 521 // 522 // Everywhere in the runtime and reflect packages, it is assumed that 523 // there is exactly one *_type per Go type, so that pointer equality 524 // can be used to test if types are equal. There is one place that 525 // breaks this assumption: buildmode=shared. In this case a type can 526 // appear as two different pieces of memory. This is hidden from the 527 // runtime and reflect package by the per-module typemap built in 528 // typelinksinit. It uses typesEqual to map types from later modules 529 // back into earlier ones. 530 // 531 // Only typelinksinit needs this function. 532 func typesEqual(t, v *_type) bool { 533 if t == v { 534 return true 535 } 536 kind := t.kind & kindMask 537 if kind != v.kind&kindMask { 538 return false 539 } 540 if t.string() != v.string() { 541 return false 542 } 543 ut := t.uncommon() 544 uv := v.uncommon() 545 if ut != nil || uv != nil { 546 if ut == nil || uv == nil { 547 return false 548 } 549 pkgpatht := t.nameOff(ut.pkgpath).name() 550 pkgpathv := v.nameOff(uv.pkgpath).name() 551 if pkgpatht != pkgpathv { 552 return false 553 } 554 } 555 if kindBool <= kind && kind <= kindComplex128 { 556 return true 557 } 558 switch kind { 559 case kindString, kindUnsafePointer: 560 return true 561 case kindArray: 562 at := (*arraytype)(unsafe.Pointer(t)) 563 av := (*arraytype)(unsafe.Pointer(v)) 564 return typesEqual(at.elem, av.elem) && at.len == av.len 565 case kindChan: 566 ct := (*chantype)(unsafe.Pointer(t)) 567 cv := (*chantype)(unsafe.Pointer(v)) 568 return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem) 569 case kindFunc: 570 ft := (*functype)(unsafe.Pointer(t)) 571 fv := (*functype)(unsafe.Pointer(v)) 572 if ft.outCount != fv.outCount || ft.inCount != fv.inCount { 573 return false 574 } 575 tin, vin := ft.in(), fv.in() 576 for i := 0; i < len(tin); i++ { 577 if !typesEqual(tin[i], vin[i]) { 578 return false 579 } 580 } 581 tout, vout := ft.out(), fv.out() 582 for i := 0; i < len(tout); i++ { 583 if !typesEqual(tout[i], vout[i]) { 584 return false 585 } 586 } 587 return true 588 case kindInterface: 589 it := (*interfacetype)(unsafe.Pointer(t)) 590 iv := (*interfacetype)(unsafe.Pointer(v)) 591 if it.pkgpath.name() != iv.pkgpath.name() { 592 return false 593 } 594 if len(it.mhdr) != len(iv.mhdr) { 595 return false 596 } 597 for i := range it.mhdr { 598 tm := &it.mhdr[i] 599 vm := &iv.mhdr[i] 600 tname := it.typ.nameOff(tm.name) 601 vname := iv.typ.nameOff(vm.name) 602 if tname.name() != vname.name() { 603 return false 604 } 605 if tname.pkgPath() != vname.pkgPath() { 606 return false 607 } 608 if !typesEqual(it.typ.typeOff(tm.ityp), iv.typ.typeOff(vm.ityp)) { 609 return false 610 } 611 } 612 return true 613 case kindMap: 614 mt := (*maptype)(unsafe.Pointer(t)) 615 mv := (*maptype)(unsafe.Pointer(v)) 616 return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem) 617 case kindPtr: 618 pt := (*ptrtype)(unsafe.Pointer(t)) 619 pv := (*ptrtype)(unsafe.Pointer(v)) 620 return typesEqual(pt.elem, pv.elem) 621 case kindSlice: 622 st := (*slicetype)(unsafe.Pointer(t)) 623 sv := (*slicetype)(unsafe.Pointer(v)) 624 return typesEqual(st.elem, sv.elem) 625 case kindStruct: 626 st := (*structtype)(unsafe.Pointer(t)) 627 sv := (*structtype)(unsafe.Pointer(v)) 628 if len(st.fields) != len(sv.fields) { 629 return false 630 } 631 for i := range st.fields { 632 tf := &st.fields[i] 633 vf := &sv.fields[i] 634 if tf.name.name() != vf.name.name() { 635 return false 636 } 637 if tf.name.pkgPath() != vf.name.pkgPath() { 638 return false 639 } 640 if !typesEqual(tf.typ, vf.typ) { 641 return false 642 } 643 if tf.name.tag() != vf.name.tag() { 644 return false 645 } 646 if tf.offset != vf.offset { 647 return false 648 } 649 } 650 return true 651 default: 652 println("runtime: impossible type kind", kind) 653 throw("runtime: impossible type kind") 654 return false 655 } 656 }