github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 // callers is a slice of PCs that have not yet been expanded. 17 callers []uintptr 18 19 // stackExpander expands callers into a sequence of Frames, 20 // tracking the necessary state across PCs. 21 stackExpander stackExpander 22 23 // elideWrapper indicates that, if the next frame is an 24 // autogenerated wrapper function, it should be elided from 25 // the stack. 26 elideWrapper bool 27 } 28 29 // Frame is the information returned by Frames for each call frame. 30 type Frame struct { 31 // PC is the program counter for the location in this frame. 32 // For a frame that calls another frame, this will be the 33 // program counter of a call instruction. Because of inlining, 34 // multiple frames may have the same PC value, but different 35 // symbolic information. 36 PC uintptr 37 38 // Func is the Func value of this call frame. This may be nil 39 // for non-Go code or fully inlined functions. 40 Func *Func 41 42 // Function is the package path-qualified function name of 43 // this call frame. If non-empty, this string uniquely 44 // identifies a single function in the program. 45 // This may be the empty string if not known. 46 // If Func is not nil then Function == Func.Name(). 47 Function string 48 49 // File and Line are the file name and line number of the 50 // location in this frame. For non-leaf frames, this will be 51 // the location of a call. These may be the empty string and 52 // zero, respectively, if not known. 53 File string 54 Line int 55 56 // Entry point program counter for the function; may be zero 57 // if not known. If Func is not nil then Entry == 58 // Func.Entry(). 59 Entry uintptr 60 } 61 62 // stackExpander expands a call stack of PCs into a sequence of 63 // Frames. It tracks state across PCs necessary to perform this 64 // expansion. 65 // 66 // This is the core of the Frames implementation, but is a separate 67 // internal API to make it possible to use within the runtime without 68 // heap-allocating the PC slice. The only difference with the public 69 // Frames API is that the caller is responsible for threading the PC 70 // slice between expansion steps in this API. If escape analysis were 71 // smarter, we may not need this (though it may have to be a lot 72 // smarter). 73 type stackExpander struct { 74 // pcExpander expands the current PC into a sequence of Frames. 75 pcExpander pcExpander 76 77 // If previous caller in iteration was a panic, then the next 78 // PC in the call stack is the address of the faulting 79 // instruction instead of the return address of the call. 80 wasPanic bool 81 82 // skip > 0 indicates that skip frames in the expansion of the 83 // first PC should be skipped over and callers[1] should also 84 // be skipped. 85 skip int 86 } 87 88 // CallersFrames takes a slice of PC values returned by Callers and 89 // prepares to return function/file/line information. 90 // Do not change the slice until you are done with the Frames. 91 func CallersFrames(callers []uintptr) *Frames { 92 ci := &Frames{} 93 ci.callers = ci.stackExpander.init(callers) 94 return ci 95 } 96 97 func (se *stackExpander) init(callers []uintptr) []uintptr { 98 if len(callers) >= 1 { 99 pc := callers[0] 100 s := pc - skipPC 101 if s >= 0 && s < sizeofSkipFunction { 102 // Ignore skip frame callers[0] since this means the caller trimmed the PC slice. 103 return callers[1:] 104 } 105 } 106 if len(callers) >= 2 { 107 pc := callers[1] 108 s := pc - skipPC 109 if s > 0 && s < sizeofSkipFunction { 110 // Skip the first s inlined frames when we expand the first PC. 111 se.skip = int(s) 112 } 113 } 114 return callers 115 } 116 117 // Next returns frame information for the next caller. 118 // If more is false, there are no more callers (the Frame value is valid). 119 func (ci *Frames) Next() (frame Frame, more bool) { 120 ci.callers, frame, more = ci.stackExpander.next(ci.callers, ci.elideWrapper) 121 ci.elideWrapper = elideWrapperCalling(frame.Function) 122 return 123 } 124 125 func (se *stackExpander) next(callers []uintptr, elideWrapper bool) (ncallers []uintptr, frame Frame, more bool) { 126 ncallers = callers 127 again: 128 if !se.pcExpander.more { 129 // Expand the next PC. 130 if len(ncallers) == 0 { 131 se.wasPanic = false 132 return ncallers, Frame{}, false 133 } 134 se.pcExpander.init(ncallers[0], se.wasPanic) 135 ncallers = ncallers[1:] 136 se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.funcID == funcID_sigpanic 137 if se.skip > 0 { 138 for ; se.skip > 0; se.skip-- { 139 se.pcExpander.next() 140 } 141 se.skip = 0 142 // Drop skipPleaseUseCallersFrames. 143 ncallers = ncallers[1:] 144 } 145 if !se.pcExpander.more { 146 // No symbolic information for this PC. 147 // However, we return at least one frame for 148 // every PC, so return an invalid frame. 149 return ncallers, Frame{}, len(ncallers) > 0 150 } 151 } 152 153 frame = se.pcExpander.next() 154 if elideWrapper && frame.File == "<autogenerated>" { 155 // Ignore autogenerated functions such as pointer 156 // method forwarding functions. These are an 157 // implementation detail that doesn't reflect the 158 // source code. 159 goto again 160 } 161 return ncallers, frame, se.pcExpander.more || len(ncallers) > 0 162 } 163 164 // A pcExpander expands a single PC into a sequence of Frames. 165 type pcExpander struct { 166 // more indicates that the next call to next will return a 167 // valid frame. 168 more bool 169 170 // pc is the pc being expanded. 171 pc uintptr 172 173 // frames is a pre-expanded set of Frames to return from the 174 // iterator. If this is set, then this is everything that will 175 // be returned from the iterator. 176 frames []Frame 177 178 // funcInfo is the funcInfo of the function containing pc. 179 funcInfo funcInfo 180 181 // inlTree is the inlining tree of the function containing pc. 182 inlTree *[1 << 20]inlinedCall 183 184 // file and line are the file name and line number of the next 185 // frame. 186 file string 187 line int32 188 189 // inlIndex is the inlining index of the next frame, or -1 if 190 // the next frame is an outermost frame. 191 inlIndex int32 192 } 193 194 // init initializes this pcExpander to expand pc. It sets ex.more if 195 // pc expands to any Frames. 196 // 197 // A pcExpander can be reused by calling init again. 198 // 199 // If pc was a "call" to sigpanic, panicCall should be true. In this 200 // case, pc is treated as the address of a faulting instruction 201 // instead of the return address of a call. 202 func (ex *pcExpander) init(pc uintptr, panicCall bool) { 203 ex.more = false 204 205 ex.funcInfo = findfunc(pc) 206 if !ex.funcInfo.valid() { 207 if cgoSymbolizer != nil { 208 // Pre-expand cgo frames. We could do this 209 // incrementally, too, but there's no way to 210 // avoid allocation in this case anyway. 211 ex.frames = expandCgoFrames(pc) 212 ex.more = len(ex.frames) > 0 213 } 214 return 215 } 216 217 ex.more = true 218 entry := ex.funcInfo.entry 219 ex.pc = pc 220 if ex.pc > entry && !panicCall { 221 ex.pc-- 222 } 223 224 // file and line are the innermost position at pc. 225 ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false) 226 227 // Get inlining tree at pc 228 inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree) 229 if inldata != nil { 230 ex.inlTree = (*[1 << 20]inlinedCall)(inldata) 231 ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil) 232 } else { 233 ex.inlTree = nil 234 ex.inlIndex = -1 235 } 236 } 237 238 // next returns the next Frame in the expansion of pc and sets ex.more 239 // if there are more Frames to follow. 240 func (ex *pcExpander) next() Frame { 241 if !ex.more { 242 return Frame{} 243 } 244 245 if len(ex.frames) > 0 { 246 // Return pre-expended frame. 247 frame := ex.frames[0] 248 ex.frames = ex.frames[1:] 249 ex.more = len(ex.frames) > 0 250 return frame 251 } 252 253 if ex.inlIndex >= 0 { 254 // Return inner inlined frame. 255 call := ex.inlTree[ex.inlIndex] 256 frame := Frame{ 257 PC: ex.pc, 258 Func: nil, // nil for inlined functions 259 Function: funcnameFromNameoff(ex.funcInfo, call.func_), 260 File: ex.file, 261 Line: int(ex.line), 262 Entry: ex.funcInfo.entry, 263 } 264 ex.file = funcfile(ex.funcInfo, call.file) 265 ex.line = call.line 266 ex.inlIndex = call.parent 267 return frame 268 } 269 270 // No inlining or pre-expanded frames. 271 ex.more = false 272 return Frame{ 273 PC: ex.pc, 274 Func: ex.funcInfo._Func(), 275 Function: funcname(ex.funcInfo), 276 File: ex.file, 277 Line: int(ex.line), 278 Entry: ex.funcInfo.entry, 279 } 280 } 281 282 // expandCgoFrames expands frame information for pc, known to be 283 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames 284 // returns nil if pc could not be expanded. 285 func expandCgoFrames(pc uintptr) []Frame { 286 arg := cgoSymbolizerArg{pc: pc} 287 callCgoSymbolizer(&arg) 288 289 if arg.file == nil && arg.funcName == nil { 290 // No useful information from symbolizer. 291 return nil 292 } 293 294 var frames []Frame 295 for { 296 frames = append(frames, Frame{ 297 PC: pc, 298 Func: nil, 299 Function: gostring(arg.funcName), 300 File: gostring(arg.file), 301 Line: int(arg.lineno), 302 Entry: arg.entry, 303 }) 304 if arg.more == 0 { 305 break 306 } 307 callCgoSymbolizer(&arg) 308 } 309 310 // No more frames for this PC. Tell the symbolizer we are done. 311 // We don't try to maintain a single cgoSymbolizerArg for the 312 // whole use of Frames, because there would be no good way to tell 313 // the symbolizer when we are done. 314 arg.pc = 0 315 callCgoSymbolizer(&arg) 316 317 return frames 318 } 319 320 // NOTE: Func does not expose the actual unexported fields, because we return *Func 321 // values to users, and we want to keep them from being able to overwrite the data 322 // with (say) *f = Func{}. 323 // All code operating on a *Func must call raw() to get the *_func 324 // or funcInfo() to get the funcInfo instead. 325 326 // A Func represents a Go function in the running binary. 327 type Func struct { 328 opaque struct{} // unexported field to disallow conversions 329 } 330 331 func (f *Func) raw() *_func { 332 return (*_func)(unsafe.Pointer(f)) 333 } 334 335 func (f *Func) funcInfo() funcInfo { 336 fn := f.raw() 337 return funcInfo{fn, findmoduledatap(fn.entry)} 338 } 339 340 // PCDATA and FUNCDATA table indexes. 341 // 342 // See funcdata.h and ../cmd/internal/objabi/funcdata.go. 343 const ( 344 _PCDATA_StackMapIndex = 0 345 _PCDATA_InlTreeIndex = 1 346 _PCDATA_RegMapIndex = 2 347 _FUNCDATA_ArgsPointerMaps = 0 348 _FUNCDATA_LocalsPointerMaps = 1 349 _FUNCDATA_InlTree = 2 350 _FUNCDATA_RegPointerMaps = 3 351 _FUNCDATA_StackObjects = 4 352 _ArgsSizeUnknown = -0x80000000 353 ) 354 355 // A FuncID identifies particular functions that need to be treated 356 // specially by the runtime. 357 // Note that in some situations involving plugins, there may be multiple 358 // copies of a particular special runtime function. 359 // Note: this list must match the list in cmd/internal/objabi/funcid.go. 360 type funcID uint8 361 362 const ( 363 funcID_normal funcID = iota // not a special function 364 funcID_runtime_main 365 funcID_goexit 366 funcID_jmpdefer 367 funcID_mcall 368 funcID_morestack 369 funcID_mstart 370 funcID_rt0_go 371 funcID_asmcgocall 372 funcID_sigpanic 373 funcID_runfinq 374 funcID_gcBgMarkWorker 375 funcID_systemstack_switch 376 funcID_systemstack 377 funcID_cgocallback_gofunc 378 funcID_gogo 379 funcID_externalthreadhandler 380 funcID_debugCallV1 381 ) 382 383 // moduledata records information about the layout of the executable 384 // image. It is written by the linker. Any changes here must be 385 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 386 // moduledata is stored in statically allocated non-pointer memory; 387 // none of the pointers here are visible to the garbage collector. 388 type moduledata struct { 389 pclntable []byte 390 ftab []functab 391 filetab []uint32 392 findfunctab uintptr 393 minpc, maxpc uintptr 394 395 text, etext uintptr 396 noptrdata, enoptrdata uintptr 397 data, edata uintptr 398 bss, ebss uintptr 399 noptrbss, enoptrbss uintptr 400 end, gcdata, gcbss uintptr 401 types, etypes uintptr 402 403 textsectmap []textsect 404 typelinks []int32 // offsets from types 405 itablinks []*itab 406 407 ptab []ptabEntry 408 409 pluginpath string 410 pkghashes []modulehash 411 412 modulename string 413 modulehashes []modulehash 414 415 hasmain uint8 // 1 if module contains the main function, 0 otherwise 416 417 gcdatamask, gcbssmask bitvector 418 419 typemap map[typeOff]*_type // offset to *_rtype in previous module 420 421 bad bool // module failed to load and should be ignored 422 423 next *moduledata 424 } 425 426 // A modulehash is used to compare the ABI of a new module or a 427 // package in a new module with the loaded program. 428 // 429 // For each shared library a module links against, the linker creates an entry in the 430 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 431 // at link time and a pointer to the runtime abi hash. These are checked in 432 // moduledataverify1 below. 433 // 434 // For each loaded plugin, the pkghashes slice has a modulehash of the 435 // newly loaded package that can be used to check the plugin's version of 436 // a package against any previously loaded version of the package. 437 // This is done in plugin.lastmoduleinit. 438 type modulehash struct { 439 modulename string 440 linktimehash string 441 runtimehash *string 442 } 443 444 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 445 // 446 // These typemap objects are allocated at run time on the heap, but the 447 // only direct reference to them is in the moduledata, created by the 448 // linker and marked SNOPTRDATA so it is ignored by the GC. 449 // 450 // To make sure the map isn't collected, we keep a second reference here. 451 var pinnedTypemaps []map[typeOff]*_type 452 453 var firstmoduledata moduledata // linker symbol 454 var lastmoduledatap *moduledata // linker symbol 455 var modulesSlice *[]*moduledata // see activeModules 456 457 // activeModules returns a slice of active modules. 458 // 459 // A module is active once its gcdatamask and gcbssmask have been 460 // assembled and it is usable by the GC. 461 // 462 // This is nosplit/nowritebarrier because it is called by the 463 // cgo pointer checking code. 464 //go:nosplit 465 //go:nowritebarrier 466 func activeModules() []*moduledata { 467 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 468 if p == nil { 469 return nil 470 } 471 return *p 472 } 473 474 // modulesinit creates the active modules slice out of all loaded modules. 475 // 476 // When a module is first loaded by the dynamic linker, an .init_array 477 // function (written by cmd/link) is invoked to call addmoduledata, 478 // appending to the module to the linked list that starts with 479 // firstmoduledata. 480 // 481 // There are two times this can happen in the lifecycle of a Go 482 // program. First, if compiled with -linkshared, a number of modules 483 // built with -buildmode=shared can be loaded at program initialization. 484 // Second, a Go program can load a module while running that was built 485 // with -buildmode=plugin. 486 // 487 // After loading, this function is called which initializes the 488 // moduledata so it is usable by the GC and creates a new activeModules 489 // list. 490 // 491 // Only one goroutine may call modulesinit at a time. 492 func modulesinit() { 493 modules := new([]*moduledata) 494 for md := &firstmoduledata; md != nil; md = md.next { 495 if md.bad { 496 continue 497 } 498 *modules = append(*modules, md) 499 if md.gcdatamask == (bitvector{}) { 500 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 501 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 502 } 503 } 504 505 // Modules appear in the moduledata linked list in the order they are 506 // loaded by the dynamic loader, with one exception: the 507 // firstmoduledata itself the module that contains the runtime. This 508 // is not always the first module (when using -buildmode=shared, it 509 // is typically libstd.so, the second module). The order matters for 510 // typelinksinit, so we swap the first module with whatever module 511 // contains the main function. 512 // 513 // See Issue #18729. 514 for i, md := range *modules { 515 if md.hasmain != 0 { 516 (*modules)[0] = md 517 (*modules)[i] = &firstmoduledata 518 break 519 } 520 } 521 522 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 523 } 524 525 type functab struct { 526 entry uintptr 527 funcoff uintptr 528 } 529 530 // Mapping information for secondary text sections 531 532 type textsect struct { 533 vaddr uintptr // prelinked section vaddr 534 length uintptr // section length 535 baseaddr uintptr // relocated section address 536 } 537 538 const minfunc = 16 // minimum function size 539 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 540 541 // findfunctab is an array of these structures. 542 // Each bucket represents 4096 bytes of the text segment. 543 // Each subbucket represents 256 bytes of the text segment. 544 // To find a function given a pc, locate the bucket and subbucket for 545 // that pc. Add together the idx and subbucket value to obtain a 546 // function index. Then scan the functab array starting at that 547 // index to find the target function. 548 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 549 type findfuncbucket struct { 550 idx uint32 551 subbuckets [16]byte 552 } 553 554 func moduledataverify() { 555 for datap := &firstmoduledata; datap != nil; datap = datap.next { 556 moduledataverify1(datap) 557 } 558 } 559 560 const debugPcln = false 561 562 func moduledataverify1(datap *moduledata) { 563 // See golang.org/s/go12symtab for header: 0xfffffffb, 564 // two zero bytes, a byte giving the PC quantum, 565 // and a byte giving the pointer width in bytes. 566 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 567 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 568 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 569 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 570 throw("invalid function symbol table\n") 571 } 572 573 // ftab is lookup table for function by program counter. 574 nftab := len(datap.ftab) - 1 575 for i := 0; i < nftab; i++ { 576 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 577 if datap.ftab[i].entry > datap.ftab[i+1].entry { 578 f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 579 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 580 f2name := "end" 581 if i+1 < nftab { 582 f2name = funcname(f2) 583 } 584 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 585 for j := 0; j <= i; j++ { 586 print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n") 587 } 588 throw("invalid runtime symbol table") 589 } 590 } 591 592 if datap.minpc != datap.ftab[0].entry || 593 datap.maxpc != datap.ftab[nftab].entry { 594 throw("minpc or maxpc invalid") 595 } 596 597 for _, modulehash := range datap.modulehashes { 598 if modulehash.linktimehash != *modulehash.runtimehash { 599 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 600 throw("abi mismatch") 601 } 602 } 603 } 604 605 // FuncForPC returns a *Func describing the function that contains the 606 // given program counter address, or else nil. 607 // 608 // If pc represents multiple functions because of inlining, it returns 609 // the *Func describing the outermost function. 610 func FuncForPC(pc uintptr) *Func { 611 return findfunc(pc)._Func() 612 } 613 614 // Name returns the name of the function. 615 func (f *Func) Name() string { 616 if f == nil { 617 return "" 618 } 619 return funcname(f.funcInfo()) 620 } 621 622 // Entry returns the entry address of the function. 623 func (f *Func) Entry() uintptr { 624 return f.raw().entry 625 } 626 627 // FileLine returns the file name and line number of the 628 // source code corresponding to the program counter pc. 629 // The result will not be accurate if pc is not a program 630 // counter within f. 631 func (f *Func) FileLine(pc uintptr) (file string, line int) { 632 // Pass strict=false here, because anyone can call this function, 633 // and they might just be wrong about targetpc belonging to f. 634 file, line32 := funcline1(f.funcInfo(), pc, false) 635 return file, int(line32) 636 } 637 638 func findmoduledatap(pc uintptr) *moduledata { 639 for datap := &firstmoduledata; datap != nil; datap = datap.next { 640 if datap.minpc <= pc && pc < datap.maxpc { 641 return datap 642 } 643 } 644 return nil 645 } 646 647 type funcInfo struct { 648 *_func 649 datap *moduledata 650 } 651 652 func (f funcInfo) valid() bool { 653 return f._func != nil 654 } 655 656 func (f funcInfo) _Func() *Func { 657 return (*Func)(unsafe.Pointer(f._func)) 658 } 659 660 func findfunc(pc uintptr) funcInfo { 661 datap := findmoduledatap(pc) 662 if datap == nil { 663 return funcInfo{} 664 } 665 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 666 667 x := pc - datap.minpc 668 b := x / pcbucketsize 669 i := x % pcbucketsize / (pcbucketsize / nsub) 670 671 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 672 idx := ffb.idx + uint32(ffb.subbuckets[i]) 673 674 // If the idx is beyond the end of the ftab, set it to the end of the table and search backward. 675 // This situation can occur if multiple text sections are generated to handle large text sections 676 // and the linker has inserted jump tables between them. 677 678 if idx >= uint32(len(datap.ftab)) { 679 idx = uint32(len(datap.ftab) - 1) 680 } 681 if pc < datap.ftab[idx].entry { 682 // With multiple text sections, the idx might reference a function address that 683 // is higher than the pc being searched, so search backward until the matching address is found. 684 685 for datap.ftab[idx].entry > pc && idx > 0 { 686 idx-- 687 } 688 if idx == 0 { 689 throw("findfunc: bad findfunctab entry idx") 690 } 691 } else { 692 // linear search to find func with pc >= entry. 693 for datap.ftab[idx+1].entry <= pc { 694 idx++ 695 } 696 } 697 return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap} 698 } 699 700 type pcvalueCache struct { 701 entries [16]pcvalueCacheEnt 702 } 703 704 type pcvalueCacheEnt struct { 705 // targetpc and off together are the key of this cache entry. 706 targetpc uintptr 707 off int32 708 // val is the value of this cached pcvalue entry. 709 val int32 710 } 711 712 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 713 if off == 0 { 714 return -1 715 } 716 717 // Check the cache. This speeds up walks of deep stacks, which 718 // tend to have the same recursive functions over and over. 719 // 720 // This cache is small enough that full associativity is 721 // cheaper than doing the hashing for a less associative 722 // cache. 723 if cache != nil { 724 for i := range cache.entries { 725 // We check off first because we're more 726 // likely to have multiple entries with 727 // different offsets for the same targetpc 728 // than the other way around, so we'll usually 729 // fail in the first clause. 730 ent := &cache.entries[i] 731 if ent.off == off && ent.targetpc == targetpc { 732 return ent.val 733 } 734 } 735 } 736 737 if !f.valid() { 738 if strict && panicking == 0 { 739 print("runtime: no module data for ", hex(f.entry), "\n") 740 throw("no module data") 741 } 742 return -1 743 } 744 datap := f.datap 745 p := datap.pclntable[off:] 746 pc := f.entry 747 val := int32(-1) 748 for { 749 var ok bool 750 p, ok = step(p, &pc, &val, pc == f.entry) 751 if !ok { 752 break 753 } 754 if targetpc < pc { 755 // Replace a random entry in the cache. Random 756 // replacement prevents a performance cliff if 757 // a recursive stack's cycle is slightly 758 // larger than the cache. 759 if cache != nil { 760 ci := fastrandn(uint32(len(cache.entries))) 761 cache.entries[ci] = pcvalueCacheEnt{ 762 targetpc: targetpc, 763 off: off, 764 val: val, 765 } 766 } 767 768 return val 769 } 770 } 771 772 // If there was a table, it should have covered all program counters. 773 // If not, something is wrong. 774 if panicking != 0 || !strict { 775 return -1 776 } 777 778 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 779 780 p = datap.pclntable[off:] 781 pc = f.entry 782 val = -1 783 for { 784 var ok bool 785 p, ok = step(p, &pc, &val, pc == f.entry) 786 if !ok { 787 break 788 } 789 print("\tvalue=", val, " until pc=", hex(pc), "\n") 790 } 791 792 throw("invalid runtime symbol table") 793 return -1 794 } 795 796 func cfuncname(f funcInfo) *byte { 797 if !f.valid() || f.nameoff == 0 { 798 return nil 799 } 800 return &f.datap.pclntable[f.nameoff] 801 } 802 803 func funcname(f funcInfo) string { 804 return gostringnocopy(cfuncname(f)) 805 } 806 807 func funcnameFromNameoff(f funcInfo, nameoff int32) string { 808 datap := f.datap 809 if !f.valid() { 810 return "" 811 } 812 cstr := &datap.pclntable[nameoff] 813 return gostringnocopy(cstr) 814 } 815 816 func funcfile(f funcInfo, fileno int32) string { 817 datap := f.datap 818 if !f.valid() { 819 return "?" 820 } 821 return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 822 } 823 824 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { 825 datap := f.datap 826 if !f.valid() { 827 return "?", 0 828 } 829 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 830 line = pcvalue(f, f.pcln, targetpc, nil, strict) 831 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 832 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 833 return "?", 0 834 } 835 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 836 return 837 } 838 839 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 840 return funcline1(f, targetpc, true) 841 } 842 843 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { 844 x := pcvalue(f, f.pcsp, targetpc, cache, true) 845 if x&(sys.PtrSize-1) != 0 { 846 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 847 } 848 return x 849 } 850 851 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 852 if table < 0 || table >= f.npcdata { 853 return -1 854 } 855 off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 856 return pcvalue(f, off, targetpc, cache, true) 857 } 858 859 func funcdata(f funcInfo, i uint8) unsafe.Pointer { 860 if i < 0 || i >= f.nfuncdata { 861 return nil 862 } 863 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 864 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 865 if uintptr(unsafe.Pointer(f._func))&4 != 0 { 866 println("runtime: misaligned func", f._func) 867 } 868 p = add(p, 4) 869 } 870 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 871 } 872 873 // step advances to the next pc, value pair in the encoded table. 874 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 875 // For both uvdelta and pcdelta, the common case (~70%) 876 // is that they are a single byte. If so, avoid calling readvarint. 877 uvdelta := uint32(p[0]) 878 if uvdelta == 0 && !first { 879 return nil, false 880 } 881 n := uint32(1) 882 if uvdelta&0x80 != 0 { 883 n, uvdelta = readvarint(p) 884 } 885 *val += int32(-(uvdelta & 1) ^ (uvdelta >> 1)) 886 p = p[n:] 887 888 pcdelta := uint32(p[0]) 889 n = 1 890 if pcdelta&0x80 != 0 { 891 n, pcdelta = readvarint(p) 892 } 893 p = p[n:] 894 *pc += uintptr(pcdelta * sys.PCQuantum) 895 return p, true 896 } 897 898 // readvarint reads a varint from p. 899 func readvarint(p []byte) (read uint32, val uint32) { 900 var v, shift, n uint32 901 for { 902 b := p[n] 903 n++ 904 v |= uint32(b&0x7F) << (shift & 31) 905 if b&0x80 == 0 { 906 break 907 } 908 shift += 7 909 } 910 return n, v 911 } 912 913 type stackmap struct { 914 n int32 // number of bitmaps 915 nbit int32 // number of bits in each bitmap 916 bytedata [1]byte // bitmaps, each starting on a byte boundary 917 } 918 919 //go:nowritebarrier 920 func stackmapdata(stkmap *stackmap, n int32) bitvector { 921 // Check this invariant only when stackDebug is on at all. 922 // The invariant is already checked by many of stackmapdata's callers, 923 // and disabling it by default allows stackmapdata to be inlined. 924 if stackDebug > 0 && (n < 0 || n >= stkmap.n) { 925 throw("stackmapdata: index out of range") 926 } 927 return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} 928 } 929 930 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. 931 type inlinedCall struct { 932 parent int32 // index of parent in the inltree, or < 0 933 file int32 // fileno index into filetab 934 line int32 // line number of the call site 935 func_ int32 // offset into pclntab for name of called function 936 }