github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 callers []uintptr 17 18 // If previous caller in iteration was a panic, then 19 // ci.callers[0] is the address of the faulting instruction 20 // instead of the return address of the call. 21 wasPanic bool 22 23 // Frames to return for subsequent calls to the Next method. 24 // Used for non-Go frames. 25 frames *[]Frame 26 } 27 28 // Frame is the information returned by Frames for each call frame. 29 type Frame struct { 30 // Program counter for this frame; multiple frames may have 31 // the same PC value. 32 PC uintptr 33 34 // Func for this frame; may be nil for non-Go code or fully 35 // inlined functions. 36 Func *Func 37 38 // Function name, file name, and line number for this call frame. 39 // May be the empty string or zero if not known. 40 // If Func is not nil then Function == Func.Name(). 41 Function string 42 File string 43 Line int 44 45 // Entry point for the function; may be zero if not known. 46 // If Func is not nil then Entry == Func.Entry(). 47 Entry uintptr 48 } 49 50 // CallersFrames takes a slice of PC values returned by Callers and 51 // prepares to return function/file/line information. 52 // Do not change the slice until you are done with the Frames. 53 func CallersFrames(callers []uintptr) *Frames { 54 return &Frames{callers: callers} 55 } 56 57 // Next returns frame information for the next caller. 58 // If more is false, there are no more callers (the Frame value is valid). 59 func (ci *Frames) Next() (frame Frame, more bool) { 60 if ci.frames != nil { 61 // We have saved up frames to return. 62 f := (*ci.frames)[0] 63 if len(*ci.frames) == 1 { 64 ci.frames = nil 65 } else { 66 *ci.frames = (*ci.frames)[1:] 67 } 68 return f, ci.frames != nil || len(ci.callers) > 0 69 } 70 71 if len(ci.callers) == 0 { 72 ci.wasPanic = false 73 return Frame{}, false 74 } 75 pc := ci.callers[0] 76 ci.callers = ci.callers[1:] 77 more = len(ci.callers) > 0 78 f := FuncForPC(pc) 79 if f == nil { 80 ci.wasPanic = false 81 if cgoSymbolizer != nil { 82 return ci.cgoNext(pc, more) 83 } 84 return Frame{}, more 85 } 86 87 entry := f.Entry() 88 xpc := pc 89 if xpc > entry && !ci.wasPanic { 90 xpc-- 91 } 92 file, line := f.FileLine(xpc) 93 94 function := f.Name() 95 ci.wasPanic = entry == sigpanicPC 96 97 frame = Frame{ 98 PC: xpc, 99 Func: f, 100 Function: function, 101 File: file, 102 Line: line, 103 Entry: entry, 104 } 105 106 return frame, more 107 } 108 109 // cgoNext returns frame information for pc, known to be a non-Go function, 110 // using the cgoSymbolizer hook. 111 func (ci *Frames) cgoNext(pc uintptr, more bool) (Frame, bool) { 112 arg := cgoSymbolizerArg{pc: pc} 113 callCgoSymbolizer(&arg) 114 115 if arg.file == nil && arg.funcName == nil { 116 // No useful information from symbolizer. 117 return Frame{}, more 118 } 119 120 var frames []Frame 121 for { 122 frames = append(frames, Frame{ 123 PC: pc, 124 Func: nil, 125 Function: gostring(arg.funcName), 126 File: gostring(arg.file), 127 Line: int(arg.lineno), 128 Entry: arg.entry, 129 }) 130 if arg.more == 0 { 131 break 132 } 133 callCgoSymbolizer(&arg) 134 } 135 136 // No more frames for this PC. Tell the symbolizer we are done. 137 // We don't try to maintain a single cgoSymbolizerArg for the 138 // whole use of Frames, because there would be no good way to tell 139 // the symbolizer when we are done. 140 arg.pc = 0 141 callCgoSymbolizer(&arg) 142 143 if len(frames) == 1 { 144 // Return a single frame. 145 return frames[0], more 146 } 147 148 // Return the first frame we saw and store the rest to be 149 // returned by later calls to Next. 150 rf := frames[0] 151 frames = frames[1:] 152 ci.frames = new([]Frame) 153 *ci.frames = frames 154 return rf, true 155 } 156 157 // NOTE: Func does not expose the actual unexported fields, because we return *Func 158 // values to users, and we want to keep them from being able to overwrite the data 159 // with (say) *f = Func{}. 160 // All code operating on a *Func must call raw to get the *_func instead. 161 162 // A Func represents a Go function in the running binary. 163 type Func struct { 164 opaque struct{} // unexported field to disallow conversions 165 } 166 167 func (f *Func) raw() *_func { 168 return (*_func)(unsafe.Pointer(f)) 169 } 170 171 // funcdata.h 172 const ( 173 _PCDATA_StackMapIndex = 0 174 _FUNCDATA_ArgsPointerMaps = 0 175 _FUNCDATA_LocalsPointerMaps = 1 176 _ArgsSizeUnknown = -0x80000000 177 ) 178 179 // moduledata records information about the layout of the executable 180 // image. It is written by the linker. Any changes here must be 181 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 182 // moduledata is stored in read-only memory; none of the pointers here 183 // are visible to the garbage collector. 184 type moduledata struct { 185 pclntable []byte 186 ftab []functab 187 filetab []uint32 188 findfunctab uintptr 189 minpc, maxpc uintptr 190 191 text, etext uintptr 192 noptrdata, enoptrdata uintptr 193 data, edata uintptr 194 bss, ebss uintptr 195 noptrbss, enoptrbss uintptr 196 end, gcdata, gcbss uintptr 197 types, etypes uintptr 198 199 textsectmap []textsect 200 typelinks []int32 // offsets from types 201 itablinks []*itab 202 203 ptab []ptabEntry 204 205 pluginpath string 206 modulename string 207 modulehashes []modulehash 208 209 gcdatamask, gcbssmask bitvector 210 211 typemap map[typeOff]*_type // offset to *_rtype in previous module 212 213 next *moduledata 214 } 215 216 // For each shared library a module links against, the linker creates an entry in the 217 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 218 // at link time and a pointer to the runtime abi hash. These are checked in 219 // moduledataverify1 below. 220 type modulehash struct { 221 modulename string 222 linktimehash string 223 runtimehash *string 224 } 225 226 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 227 // 228 // These typemap objects are allocated at run time on the heap, but the 229 // only direct reference to them is in the moduledata, created by the 230 // linker and marked SNOPTRDATA so it is ignored by the GC. 231 // 232 // To make sure the map isn't collected, we keep a second reference here. 233 var pinnedTypemaps []map[typeOff]*_type 234 235 var firstmoduledata moduledata // linker symbol 236 var lastmoduledatap *moduledata // linker symbol 237 var modulesSlice unsafe.Pointer // see activeModules 238 239 // activeModules returns a slice of active modules. 240 // 241 // A module is active once its gcdatamask and gcbssmask have been 242 // assembled and it is usable by the GC. 243 func activeModules() []*moduledata { 244 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 245 if p == nil { 246 return nil 247 } 248 return *p 249 } 250 251 // modulesinit creates the active modules slice out of all loaded modules. 252 // 253 // When a module is first loaded by the dynamic linker, an .init_array 254 // function (written by cmd/link) is invoked to call addmoduledata, 255 // appending to the module to the linked list that starts with 256 // firstmoduledata. 257 // 258 // There are two times this can happen in the lifecycle of a Go 259 // program. First, if compiled with -linkshared, a number of modules 260 // built with -buildmode=shared can be loaded at program initialization. 261 // Second, a Go program can load a module while running that was built 262 // with -buildmode=plugin. 263 // 264 // After loading, this function is called which initializes the 265 // moduledata so it is usable by the GC and creates a new activeModules 266 // list. 267 // 268 // Only one goroutine may call modulesinit at a time. 269 func modulesinit() { 270 modules := new([]*moduledata) 271 for md := &firstmoduledata; md != nil; md = md.next { 272 *modules = append(*modules, md) 273 if md.gcdatamask == (bitvector{}) { 274 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 275 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 276 } 277 } 278 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 279 } 280 281 type functab struct { 282 entry uintptr 283 funcoff uintptr 284 } 285 286 // Mapping information for secondary text sections 287 288 type textsect struct { 289 vaddr uintptr // prelinked section vaddr 290 length uintptr // section length 291 baseaddr uintptr // relocated section address 292 } 293 294 const minfunc = 16 // minimum function size 295 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 296 297 // findfunctab is an array of these structures. 298 // Each bucket represents 4096 bytes of the text segment. 299 // Each subbucket represents 256 bytes of the text segment. 300 // To find a function given a pc, locate the bucket and subbucket for 301 // that pc. Add together the idx and subbucket value to obtain a 302 // function index. Then scan the functab array starting at that 303 // index to find the target function. 304 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 305 type findfuncbucket struct { 306 idx uint32 307 subbuckets [16]byte 308 } 309 310 func moduledataverify() { 311 for datap := &firstmoduledata; datap != nil; datap = datap.next { 312 moduledataverify1(datap) 313 } 314 } 315 316 const debugPcln = false 317 318 func moduledataverify1(datap *moduledata) { 319 // See golang.org/s/go12symtab for header: 0xfffffffb, 320 // two zero bytes, a byte giving the PC quantum, 321 // and a byte giving the pointer width in bytes. 322 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 323 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 324 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 325 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 326 throw("invalid function symbol table\n") 327 } 328 329 // ftab is lookup table for function by program counter. 330 nftab := len(datap.ftab) - 1 331 var pcCache pcvalueCache 332 for i := 0; i < nftab; i++ { 333 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 334 if datap.ftab[i].entry > datap.ftab[i+1].entry { 335 f1 := (*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])) 336 f2 := (*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])) 337 f2name := "end" 338 if i+1 < nftab { 339 f2name = funcname(f2) 340 } 341 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 342 for j := 0; j <= i; j++ { 343 print("\t", hex(datap.ftab[j].entry), " ", funcname((*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff]))), "\n") 344 } 345 throw("invalid runtime symbol table") 346 } 347 348 if debugPcln || nftab-i < 5 { 349 // Check a PC near but not at the very end. 350 // The very end might be just padding that is not covered by the tables. 351 // No architecture rounds function entries to more than 16 bytes, 352 // but if one came along we'd need to subtract more here. 353 // But don't use the next PC if it corresponds to a foreign object chunk 354 // (no pcln table, f2.pcln == 0). That chunk might have an alignment 355 // more than 16 bytes. 356 f := (*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])) 357 end := f.entry 358 if i+1 < nftab { 359 f2 := (*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])) 360 if f2.pcln != 0 { 361 end = f2.entry - 16 362 if end < f.entry { 363 end = f.entry 364 } 365 } 366 } 367 pcvalue(f, f.pcfile, end, &pcCache, true) 368 pcvalue(f, f.pcln, end, &pcCache, true) 369 pcvalue(f, f.pcsp, end, &pcCache, true) 370 } 371 } 372 373 if datap.minpc != datap.ftab[0].entry || 374 datap.maxpc != datap.ftab[nftab].entry { 375 throw("minpc or maxpc invalid") 376 } 377 378 for _, modulehash := range datap.modulehashes { 379 if modulehash.linktimehash != *modulehash.runtimehash { 380 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 381 throw("abi mismatch") 382 } 383 } 384 } 385 386 // FuncForPC returns a *Func describing the function that contains the 387 // given program counter address, or else nil. 388 func FuncForPC(pc uintptr) *Func { 389 return (*Func)(unsafe.Pointer(findfunc(pc))) 390 } 391 392 // Name returns the name of the function. 393 func (f *Func) Name() string { 394 return funcname(f.raw()) 395 } 396 397 // Entry returns the entry address of the function. 398 func (f *Func) Entry() uintptr { 399 return f.raw().entry 400 } 401 402 // FileLine returns the file name and line number of the 403 // source code corresponding to the program counter pc. 404 // The result will not be accurate if pc is not a program 405 // counter within f. 406 func (f *Func) FileLine(pc uintptr) (file string, line int) { 407 // Pass strict=false here, because anyone can call this function, 408 // and they might just be wrong about targetpc belonging to f. 409 file, line32 := funcline1(f.raw(), pc, false) 410 return file, int(line32) 411 } 412 413 func findmoduledatap(pc uintptr) *moduledata { 414 for datap := &firstmoduledata; datap != nil; datap = datap.next { 415 if datap.minpc <= pc && pc < datap.maxpc { 416 return datap 417 } 418 } 419 return nil 420 } 421 422 func findfunc(pc uintptr) *_func { 423 datap := findmoduledatap(pc) 424 if datap == nil { 425 return nil 426 } 427 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 428 429 x := pc - datap.minpc 430 b := x / pcbucketsize 431 i := x % pcbucketsize / (pcbucketsize / nsub) 432 433 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 434 idx := ffb.idx + uint32(ffb.subbuckets[i]) 435 if pc < datap.ftab[idx].entry { 436 437 // If there are multiple text sections then the buckets for the secondary 438 // text sections will be off because the addresses in those text sections 439 // were relocated to higher addresses. Search back to find it. 440 441 for datap.ftab[idx].entry > pc && idx > 0 { 442 idx-- 443 } 444 if idx == 0 { 445 throw("findfunc: bad findfunctab entry idx") 446 } 447 } else { 448 449 // linear search to find func with pc >= entry. 450 for datap.ftab[idx+1].entry <= pc { 451 idx++ 452 } 453 } 454 return (*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])) 455 } 456 457 type pcvalueCache struct { 458 entries [16]pcvalueCacheEnt 459 } 460 461 type pcvalueCacheEnt struct { 462 // targetpc and off together are the key of this cache entry. 463 targetpc uintptr 464 off int32 465 // val is the value of this cached pcvalue entry. 466 val int32 467 } 468 469 func pcvalue(f *_func, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 470 if off == 0 { 471 return -1 472 } 473 474 // Check the cache. This speeds up walks of deep stacks, which 475 // tend to have the same recursive functions over and over. 476 // 477 // This cache is small enough that full associativity is 478 // cheaper than doing the hashing for a less associative 479 // cache. 480 if cache != nil { 481 for _, ent := range cache.entries { 482 // We check off first because we're more 483 // likely to have multiple entries with 484 // different offsets for the same targetpc 485 // than the other way around, so we'll usually 486 // fail in the first clause. 487 if ent.off == off && ent.targetpc == targetpc { 488 return ent.val 489 } 490 } 491 } 492 493 datap := findmoduledatap(f.entry) // inefficient 494 if datap == nil { 495 if strict && panicking == 0 { 496 print("runtime: no module data for ", hex(f.entry), "\n") 497 throw("no module data") 498 } 499 return -1 500 } 501 p := datap.pclntable[off:] 502 pc := f.entry 503 val := int32(-1) 504 for { 505 var ok bool 506 p, ok = step(p, &pc, &val, pc == f.entry) 507 if !ok { 508 break 509 } 510 if targetpc < pc { 511 // Replace a random entry in the cache. Random 512 // replacement prevents a performance cliff if 513 // a recursive stack's cycle is slightly 514 // larger than the cache. 515 if cache != nil { 516 ci := fastrand() % uint32(len(cache.entries)) 517 cache.entries[ci] = pcvalueCacheEnt{ 518 targetpc: targetpc, 519 off: off, 520 val: val, 521 } 522 } 523 524 return val 525 } 526 } 527 528 // If there was a table, it should have covered all program counters. 529 // If not, something is wrong. 530 if panicking != 0 || !strict { 531 return -1 532 } 533 534 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 535 536 p = datap.pclntable[off:] 537 pc = f.entry 538 val = -1 539 for { 540 var ok bool 541 p, ok = step(p, &pc, &val, pc == f.entry) 542 if !ok { 543 break 544 } 545 print("\tvalue=", val, " until pc=", hex(pc), "\n") 546 } 547 548 throw("invalid runtime symbol table") 549 return -1 550 } 551 552 func cfuncname(f *_func) *byte { 553 if f == nil || f.nameoff == 0 { 554 return nil 555 } 556 datap := findmoduledatap(f.entry) // inefficient 557 if datap == nil { 558 return nil 559 } 560 return &datap.pclntable[f.nameoff] 561 } 562 563 func funcname(f *_func) string { 564 return gostringnocopy(cfuncname(f)) 565 } 566 567 func funcline1(f *_func, targetpc uintptr, strict bool) (file string, line int32) { 568 datap := findmoduledatap(f.entry) // inefficient 569 if datap == nil { 570 return "?", 0 571 } 572 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 573 line = pcvalue(f, f.pcln, targetpc, nil, strict) 574 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 575 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 576 return "?", 0 577 } 578 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 579 return 580 } 581 582 func funcline(f *_func, targetpc uintptr) (file string, line int32) { 583 return funcline1(f, targetpc, true) 584 } 585 586 func funcspdelta(f *_func, targetpc uintptr, cache *pcvalueCache) int32 { 587 x := pcvalue(f, f.pcsp, targetpc, cache, true) 588 if x&(sys.PtrSize-1) != 0 { 589 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 590 } 591 return x 592 } 593 594 func pcdatavalue(f *_func, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 595 if table < 0 || table >= f.npcdata { 596 return -1 597 } 598 off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 599 return pcvalue(f, off, targetpc, cache, true) 600 } 601 602 func funcdata(f *_func, i int32) unsafe.Pointer { 603 if i < 0 || i >= f.nfuncdata { 604 return nil 605 } 606 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 607 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 608 if uintptr(unsafe.Pointer(f))&4 != 0 { 609 println("runtime: misaligned func", f) 610 } 611 p = add(p, 4) 612 } 613 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 614 } 615 616 // step advances to the next pc, value pair in the encoded table. 617 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 618 p, uvdelta := readvarint(p) 619 if uvdelta == 0 && !first { 620 return nil, false 621 } 622 if uvdelta&1 != 0 { 623 uvdelta = ^(uvdelta >> 1) 624 } else { 625 uvdelta >>= 1 626 } 627 vdelta := int32(uvdelta) 628 p, pcdelta := readvarint(p) 629 *pc += uintptr(pcdelta * sys.PCQuantum) 630 *val += vdelta 631 return p, true 632 } 633 634 // readvarint reads a varint from p. 635 func readvarint(p []byte) (newp []byte, val uint32) { 636 var v, shift uint32 637 for { 638 b := p[0] 639 p = p[1:] 640 v |= (uint32(b) & 0x7F) << shift 641 if b&0x80 == 0 { 642 break 643 } 644 shift += 7 645 } 646 return p, v 647 } 648 649 type stackmap struct { 650 n int32 // number of bitmaps 651 nbit int32 // number of bits in each bitmap 652 bytedata [1]byte // bitmaps, each starting on a 32-bit boundary 653 } 654 655 //go:nowritebarrier 656 func stackmapdata(stkmap *stackmap, n int32) bitvector { 657 if n < 0 || n >= stkmap.n { 658 throw("stackmapdata: index out of range") 659 } 660 return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+7)/8))))} 661 }