github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/type.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Runtime type representation. 6 7 package runtime 8 9 import "unsafe" 10 11 // tflag is documented in reflect/type.go. 12 // 13 // tflag values must be kept in sync with copies in: 14 // cmd/compile/internal/gc/reflect.go 15 // cmd/link/internal/ld/decodesym.go 16 // reflect/type.go 17 type tflag uint8 18 19 const ( 20 tflagUncommon tflag = 1 << 0 21 tflagExtraStar tflag = 1 << 1 22 tflagNamed tflag = 1 << 2 23 ) 24 25 // Needs to be in sync with ../cmd/compile/internal/ld/decodesym.go:/^func.commonsize, 26 // ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and 27 // ../reflect/type.go:/^type.rtype. 28 type _type struct { 29 size uintptr 30 ptrdata uintptr // size of memory prefix holding all pointers 31 hash uint32 32 tflag tflag 33 align uint8 34 fieldalign uint8 35 kind uint8 36 alg *typeAlg 37 // gcdata stores the GC type data for the garbage collector. 38 // If the KindGCProg bit is set in kind, gcdata is a GC program. 39 // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. 40 gcdata *byte 41 str nameOff 42 ptrToThis typeOff 43 } 44 45 func (t *_type) string() string { 46 s := t.nameOff(t.str).name() 47 if t.tflag&tflagExtraStar != 0 { 48 return s[1:] 49 } 50 return s 51 } 52 53 func (t *_type) uncommon() *uncommontype { 54 if t.tflag&tflagUncommon == 0 { 55 return nil 56 } 57 switch t.kind & kindMask { 58 case kindStruct: 59 type u struct { 60 structtype 61 u uncommontype 62 } 63 return &(*u)(unsafe.Pointer(t)).u 64 case kindPtr: 65 type u struct { 66 ptrtype 67 u uncommontype 68 } 69 return &(*u)(unsafe.Pointer(t)).u 70 case kindFunc: 71 type u struct { 72 functype 73 u uncommontype 74 } 75 return &(*u)(unsafe.Pointer(t)).u 76 case kindSlice: 77 type u struct { 78 slicetype 79 u uncommontype 80 } 81 return &(*u)(unsafe.Pointer(t)).u 82 case kindArray: 83 type u struct { 84 arraytype 85 u uncommontype 86 } 87 return &(*u)(unsafe.Pointer(t)).u 88 case kindChan: 89 type u struct { 90 chantype 91 u uncommontype 92 } 93 return &(*u)(unsafe.Pointer(t)).u 94 case kindMap: 95 type u struct { 96 maptype 97 u uncommontype 98 } 99 return &(*u)(unsafe.Pointer(t)).u 100 case kindInterface: 101 type u struct { 102 interfacetype 103 u uncommontype 104 } 105 return &(*u)(unsafe.Pointer(t)).u 106 default: 107 type u struct { 108 _type 109 u uncommontype 110 } 111 return &(*u)(unsafe.Pointer(t)).u 112 } 113 } 114 115 func hasPrefix(s, prefix string) bool { 116 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 117 } 118 119 func (t *_type) name() string { 120 if t.tflag&tflagNamed == 0 { 121 return "" 122 } 123 s := t.string() 124 i := len(s) - 1 125 for i >= 0 { 126 if s[i] == '.' { 127 break 128 } 129 i-- 130 } 131 return s[i+1:] 132 } 133 134 // reflectOffs holds type offsets defined at run time by the reflect package. 135 // 136 // When a type is defined at run time, its *rtype data lives on the heap. 137 // There are a wide range of possible addresses the heap may use, that 138 // may not be representable as a 32-bit offset. Moreover the GC may 139 // one day start moving heap memory, in which case there is no stable 140 // offset that can be defined. 141 // 142 // To provide stable offsets, we add pin *rtype objects in a global map 143 // and treat the offset as an identifier. We use negative offsets that 144 // do not overlap with any compile-time module offsets. 145 // 146 // Entries are created by reflect.addReflectOff. 147 var reflectOffs struct { 148 lock mutex 149 next int32 150 m map[int32]unsafe.Pointer 151 minv map[unsafe.Pointer]int32 152 } 153 154 func reflectOffsLock() { 155 lock(&reflectOffs.lock) 156 if raceenabled { 157 raceacquire(unsafe.Pointer(&reflectOffs.lock)) 158 } 159 } 160 161 func reflectOffsUnlock() { 162 if raceenabled { 163 racerelease(unsafe.Pointer(&reflectOffs.lock)) 164 } 165 unlock(&reflectOffs.lock) 166 } 167 168 func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { 169 if off == 0 { 170 return name{} 171 } 172 base := uintptr(ptrInModule) 173 for md := &firstmoduledata; md != nil; md = md.next { 174 if base >= md.types && base < md.etypes { 175 res := md.types + uintptr(off) 176 if res > md.etypes { 177 println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 178 throw("runtime: name offset out of range") 179 } 180 return name{(*byte)(unsafe.Pointer(res))} 181 } 182 } 183 184 // No module found. see if it is a run time name. 185 reflectOffsLock() 186 res, found := reflectOffs.m[int32(off)] 187 reflectOffsUnlock() 188 if !found { 189 println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") 190 for next := &firstmoduledata; next != nil; next = next.next { 191 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 192 } 193 throw("runtime: name offset base pointer out of range") 194 } 195 return name{(*byte)(res)} 196 } 197 198 func (t *_type) nameOff(off nameOff) name { 199 return resolveNameOff(unsafe.Pointer(t), off) 200 } 201 202 func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type { 203 if off == 0 { 204 return nil 205 } 206 base := uintptr(ptrInModule) 207 var md *moduledata 208 for next := &firstmoduledata; next != nil; next = next.next { 209 if base >= next.types && base < next.etypes { 210 md = next 211 break 212 } 213 } 214 if md == nil { 215 reflectOffsLock() 216 res := reflectOffs.m[int32(off)] 217 reflectOffsUnlock() 218 if res == nil { 219 println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:") 220 for next := &firstmoduledata; next != nil; next = next.next { 221 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 222 } 223 throw("runtime: type offset base pointer out of range") 224 } 225 return (*_type)(res) 226 } 227 if t := md.typemap[off]; t != nil { 228 return t 229 } 230 res := md.types + uintptr(off) 231 if res > md.etypes { 232 println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) 233 throw("runtime: type offset out of range") 234 } 235 return (*_type)(unsafe.Pointer(res)) 236 } 237 238 func (t *_type) typeOff(off typeOff) *_type { 239 return resolveTypeOff(unsafe.Pointer(t), off) 240 } 241 242 func (t *_type) textOff(off textOff) unsafe.Pointer { 243 base := uintptr(unsafe.Pointer(t)) 244 var md *moduledata 245 for next := &firstmoduledata; next != nil; next = next.next { 246 if base >= next.types && base < next.etypes { 247 md = next 248 break 249 } 250 } 251 if md == nil { 252 reflectOffsLock() 253 res := reflectOffs.m[int32(off)] 254 reflectOffsUnlock() 255 if res == nil { 256 println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:") 257 for next := &firstmoduledata; next != nil; next = next.next { 258 println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) 259 } 260 throw("runtime: text offset base pointer out of range") 261 } 262 return res 263 } 264 res := uintptr(0) 265 266 // The text, or instruction stream is generated as one large buffer. The off (offset) for a method is 267 // its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if 268 // the target of calls are too far for the call instruction. To resolve the large text issue, the text is split 269 // into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr 270 // for each text section is set to its offset within the text. Each method's offset is compared against the section 271 // vaddrs and sizes to determine the containing section. Then the section relative offset is added to the section's 272 // relocated baseaddr to compute the method addess. 273 274 if len(md.textsectmap) > 1 { 275 for i := range md.textsectmap { 276 sectaddr := md.textsectmap[i].vaddr 277 sectlen := md.textsectmap[i].length 278 if uintptr(off) >= sectaddr && uintptr(off) <= sectaddr+sectlen { 279 res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr) 280 break 281 } 282 } 283 } else { 284 // single text section 285 res = md.text + uintptr(off) 286 } 287 288 if res > md.etext { 289 println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext)) 290 throw("runtime: text offset out of range") 291 } 292 return unsafe.Pointer(res) 293 } 294 295 func (t *functype) in() []*_type { 296 // See funcType in reflect/type.go for details on data layout. 297 uadd := uintptr(unsafe.Sizeof(functype{})) 298 if t.typ.tflag&tflagUncommon != 0 { 299 uadd += unsafe.Sizeof(uncommontype{}) 300 } 301 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount] 302 } 303 304 func (t *functype) out() []*_type { 305 // See funcType in reflect/type.go for details on data layout. 306 uadd := uintptr(unsafe.Sizeof(functype{})) 307 if t.typ.tflag&tflagUncommon != 0 { 308 uadd += unsafe.Sizeof(uncommontype{}) 309 } 310 outCount := t.outCount & (1<<15 - 1) 311 return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 312 } 313 314 func (t *functype) dotdotdot() bool { 315 return t.outCount&(1<<15) != 0 316 } 317 318 type nameOff int32 319 type typeOff int32 320 type textOff int32 321 322 type method struct { 323 name nameOff 324 mtyp typeOff 325 ifn textOff 326 tfn textOff 327 } 328 329 type uncommontype struct { 330 pkgpath nameOff 331 mcount uint16 // number of methods 332 _ uint16 // unused 333 moff uint32 // offset from this uncommontype to [mcount]method 334 _ uint32 // unused 335 } 336 337 type imethod struct { 338 name nameOff 339 ityp typeOff 340 } 341 342 type interfacetype struct { 343 typ _type 344 pkgpath name 345 mhdr []imethod 346 } 347 348 type maptype struct { 349 typ _type 350 key *_type 351 elem *_type 352 bucket *_type // internal type representing a hash bucket 353 hmap *_type // internal type representing a hmap 354 keysize uint8 // size of key slot 355 indirectkey bool // store ptr to key instead of key itself 356 valuesize uint8 // size of value slot 357 indirectvalue bool // store ptr to value instead of value itself 358 bucketsize uint16 // size of bucket 359 reflexivekey bool // true if k==k for all keys 360 needkeyupdate bool // true if we need to update key on an overwrite 361 } 362 363 type arraytype struct { 364 typ _type 365 elem *_type 366 slice *_type 367 len uintptr 368 } 369 370 type chantype struct { 371 typ _type 372 elem *_type 373 dir uintptr 374 } 375 376 type slicetype struct { 377 typ _type 378 elem *_type 379 } 380 381 type functype struct { 382 typ _type 383 inCount uint16 384 outCount uint16 385 } 386 387 type ptrtype struct { 388 typ _type 389 elem *_type 390 } 391 392 type structfield struct { 393 name name 394 typ *_type 395 offset uintptr 396 } 397 398 type structtype struct { 399 typ _type 400 pkgPath name 401 fields []structfield 402 } 403 404 // name is an encoded type name with optional extra data. 405 // See reflect/type.go for details. 406 type name struct { 407 bytes *byte 408 } 409 410 func (n name) data(off int) *byte { 411 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 412 } 413 414 func (n name) isExported() bool { 415 return (*n.bytes)&(1<<0) != 0 416 } 417 418 func (n name) nameLen() int { 419 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 420 } 421 422 func (n name) tagLen() int { 423 if *n.data(0)&(1<<1) == 0 { 424 return 0 425 } 426 off := 3 + n.nameLen() 427 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 428 } 429 430 func (n name) name() (s string) { 431 if n.bytes == nil { 432 return "" 433 } 434 nl := n.nameLen() 435 if nl == 0 { 436 return "" 437 } 438 hdr := (*stringStruct)(unsafe.Pointer(&s)) 439 hdr.str = unsafe.Pointer(n.data(3)) 440 hdr.len = nl 441 return s 442 } 443 444 func (n name) tag() (s string) { 445 tl := n.tagLen() 446 if tl == 0 { 447 return "" 448 } 449 nl := n.nameLen() 450 hdr := (*stringStruct)(unsafe.Pointer(&s)) 451 hdr.str = unsafe.Pointer(n.data(3 + nl + 2)) 452 hdr.len = tl 453 return s 454 } 455 456 func (n name) pkgPath() string { 457 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 458 return "" 459 } 460 off := 3 + n.nameLen() 461 if tl := n.tagLen(); tl > 0 { 462 off += 2 + tl 463 } 464 var nameOff nameOff 465 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 466 pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff) 467 return pkgPathName.name() 468 } 469 470 // typelinksinit scans the types from extra modules and builds the 471 // moduledata typemap used to de-duplicate type pointers. 472 func typelinksinit() { 473 if firstmoduledata.next == nil { 474 return 475 } 476 typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks)) 477 478 modules := activeModules() 479 prev := modules[0] 480 for _, md := range modules[1:] { 481 // Collect types from the previous module into typehash. 482 collect: 483 for _, tl := range prev.typelinks { 484 var t *_type 485 if prev.typemap == nil { 486 t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl))) 487 } else { 488 t = prev.typemap[typeOff(tl)] 489 } 490 // Add to typehash if not seen before. 491 tlist := typehash[t.hash] 492 for _, tcur := range tlist { 493 if tcur == t { 494 continue collect 495 } 496 } 497 typehash[t.hash] = append(tlist, t) 498 } 499 500 if md.typemap == nil { 501 // If any of this module's typelinks match a type from a 502 // prior module, prefer that prior type by adding the offset 503 // to this module's typemap. 504 tm := make(map[typeOff]*_type, len(md.typelinks)) 505 pinnedTypemaps = append(pinnedTypemaps, tm) 506 md.typemap = tm 507 for _, tl := range md.typelinks { 508 t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) 509 for _, candidate := range typehash[t.hash] { 510 if typesEqual(t, candidate) { 511 t = candidate 512 break 513 } 514 } 515 md.typemap[typeOff(tl)] = t 516 } 517 } 518 519 prev = md 520 } 521 } 522 523 // typesEqual reports whether two types are equal. 524 // 525 // Everywhere in the runtime and reflect packages, it is assumed that 526 // there is exactly one *_type per Go type, so that pointer equality 527 // can be used to test if types are equal. There is one place that 528 // breaks this assumption: buildmode=shared. In this case a type can 529 // appear as two different pieces of memory. This is hidden from the 530 // runtime and reflect package by the per-module typemap built in 531 // typelinksinit. It uses typesEqual to map types from later modules 532 // back into earlier ones. 533 // 534 // Only typelinksinit needs this function. 535 func typesEqual(t, v *_type) bool { 536 if t == v { 537 return true 538 } 539 kind := t.kind & kindMask 540 if kind != v.kind&kindMask { 541 return false 542 } 543 if t.string() != v.string() { 544 return false 545 } 546 ut := t.uncommon() 547 uv := v.uncommon() 548 if ut != nil || uv != nil { 549 if ut == nil || uv == nil { 550 return false 551 } 552 pkgpatht := t.nameOff(ut.pkgpath).name() 553 pkgpathv := v.nameOff(uv.pkgpath).name() 554 if pkgpatht != pkgpathv { 555 return false 556 } 557 } 558 if kindBool <= kind && kind <= kindComplex128 { 559 return true 560 } 561 switch kind { 562 case kindString, kindUnsafePointer: 563 return true 564 case kindArray: 565 at := (*arraytype)(unsafe.Pointer(t)) 566 av := (*arraytype)(unsafe.Pointer(v)) 567 return typesEqual(at.elem, av.elem) && at.len == av.len 568 case kindChan: 569 ct := (*chantype)(unsafe.Pointer(t)) 570 cv := (*chantype)(unsafe.Pointer(v)) 571 return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem) 572 case kindFunc: 573 ft := (*functype)(unsafe.Pointer(t)) 574 fv := (*functype)(unsafe.Pointer(v)) 575 if ft.outCount != fv.outCount || ft.inCount != fv.inCount { 576 return false 577 } 578 tin, vin := ft.in(), fv.in() 579 for i := 0; i < len(tin); i++ { 580 if !typesEqual(tin[i], vin[i]) { 581 return false 582 } 583 } 584 tout, vout := ft.out(), fv.out() 585 for i := 0; i < len(tout); i++ { 586 if !typesEqual(tout[i], vout[i]) { 587 return false 588 } 589 } 590 return true 591 case kindInterface: 592 it := (*interfacetype)(unsafe.Pointer(t)) 593 iv := (*interfacetype)(unsafe.Pointer(v)) 594 if it.pkgpath.name() != iv.pkgpath.name() { 595 return false 596 } 597 if len(it.mhdr) != len(iv.mhdr) { 598 return false 599 } 600 for i := range it.mhdr { 601 tm := &it.mhdr[i] 602 vm := &iv.mhdr[i] 603 // Note the mhdr array can be relocated from 604 // another module. See #17724. 605 tname := resolveNameOff(unsafe.Pointer(tm), tm.name) 606 vname := resolveNameOff(unsafe.Pointer(vm), vm.name) 607 if tname.name() != vname.name() { 608 return false 609 } 610 if tname.pkgPath() != vname.pkgPath() { 611 return false 612 } 613 tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp) 614 vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp) 615 if !typesEqual(tityp, vityp) { 616 return false 617 } 618 } 619 return true 620 case kindMap: 621 mt := (*maptype)(unsafe.Pointer(t)) 622 mv := (*maptype)(unsafe.Pointer(v)) 623 return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem) 624 case kindPtr: 625 pt := (*ptrtype)(unsafe.Pointer(t)) 626 pv := (*ptrtype)(unsafe.Pointer(v)) 627 return typesEqual(pt.elem, pv.elem) 628 case kindSlice: 629 st := (*slicetype)(unsafe.Pointer(t)) 630 sv := (*slicetype)(unsafe.Pointer(v)) 631 return typesEqual(st.elem, sv.elem) 632 case kindStruct: 633 st := (*structtype)(unsafe.Pointer(t)) 634 sv := (*structtype)(unsafe.Pointer(v)) 635 if len(st.fields) != len(sv.fields) { 636 return false 637 } 638 for i := range st.fields { 639 tf := &st.fields[i] 640 vf := &sv.fields[i] 641 if tf.name.name() != vf.name.name() { 642 return false 643 } 644 if tf.name.pkgPath() != vf.name.pkgPath() { 645 return false 646 } 647 if !typesEqual(tf.typ, vf.typ) { 648 return false 649 } 650 if tf.name.tag() != vf.name.tag() { 651 return false 652 } 653 if tf.offset != vf.offset { 654 return false 655 } 656 } 657 return true 658 default: 659 println("runtime: impossible type kind", kind) 660 throw("runtime: impossible type kind") 661 return false 662 } 663 }