github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 // callers is a slice of PCs that have not yet been expanded. 17 callers []uintptr 18 19 // stackExpander expands callers into a sequence of Frames, 20 // tracking the necessary state across PCs. 21 stackExpander stackExpander 22 23 // elideWrapper indicates that, if the next frame is an 24 // autogenerated wrapper function, it should be elided from 25 // the stack. 26 elideWrapper bool 27 } 28 29 // Frame is the information returned by Frames for each call frame. 30 type Frame struct { 31 // PC is the program counter for the location in this frame. 32 // For a frame that calls another frame, this will be the 33 // program counter of a call instruction. Because of inlining, 34 // multiple frames may have the same PC value, but different 35 // symbolic information. 36 PC uintptr 37 38 // Func is the Func value of this call frame. This may be nil 39 // for non-Go code or fully inlined functions. 40 Func *Func 41 42 // Function is the package path-qualified function name of 43 // this call frame. If non-empty, this string uniquely 44 // identifies a single function in the program. 45 // This may be the empty string if not known. 46 // If Func is not nil then Function == Func.Name(). 47 Function string 48 49 // File and Line are the file name and line number of the 50 // location in this frame. For non-leaf frames, this will be 51 // the location of a call. These may be the empty string and 52 // zero, respectively, if not known. 53 File string 54 Line int 55 56 // Entry point program counter for the function; may be zero 57 // if not known. If Func is not nil then Entry == 58 // Func.Entry(). 59 Entry uintptr 60 } 61 62 // stackExpander expands a call stack of PCs into a sequence of 63 // Frames. It tracks state across PCs necessary to perform this 64 // expansion. 65 // 66 // This is the core of the Frames implementation, but is a separate 67 // internal API to make it possible to use within the runtime without 68 // heap-allocating the PC slice. The only difference with the public 69 // Frames API is that the caller is responsible for threading the PC 70 // slice between expansion steps in this API. If escape analysis were 71 // smarter, we may not need this (though it may have to be a lot 72 // smarter). 73 type stackExpander struct { 74 // pcExpander expands the current PC into a sequence of Frames. 75 pcExpander pcExpander 76 77 // If previous caller in iteration was a panic, then the next 78 // PC in the call stack is the address of the faulting 79 // instruction instead of the return address of the call. 80 wasPanic bool 81 82 // skip > 0 indicates that skip frames in the expansion of the 83 // first PC should be skipped over and callers[1] should also 84 // be skipped. 85 skip int 86 } 87 88 // CallersFrames takes a slice of PC values returned by Callers and 89 // prepares to return function/file/line information. 90 // Do not change the slice until you are done with the Frames. 91 func CallersFrames(callers []uintptr) *Frames { 92 ci := &Frames{} 93 ci.callers = ci.stackExpander.init(callers) 94 return ci 95 } 96 97 func (se *stackExpander) init(callers []uintptr) []uintptr { 98 if len(callers) >= 1 { 99 pc := callers[0] 100 s := pc - skipPC 101 if s >= 0 && s < sizeofSkipFunction { 102 // Ignore skip frame callers[0] since this means the caller trimmed the PC slice. 103 return callers[1:] 104 } 105 } 106 if len(callers) >= 2 { 107 pc := callers[1] 108 s := pc - skipPC 109 if s > 0 && s < sizeofSkipFunction { 110 // Skip the first s inlined frames when we expand the first PC. 111 se.skip = int(s) 112 } 113 } 114 return callers 115 } 116 117 // Next returns frame information for the next caller. 118 // If more is false, there are no more callers (the Frame value is valid). 119 func (ci *Frames) Next() (frame Frame, more bool) { 120 ci.callers, frame, more = ci.stackExpander.next(ci.callers, ci.elideWrapper) 121 ci.elideWrapper = elideWrapperCalling(frame.Function) 122 return 123 } 124 125 func (se *stackExpander) next(callers []uintptr, elideWrapper bool) (ncallers []uintptr, frame Frame, more bool) { 126 ncallers = callers 127 again: 128 if !se.pcExpander.more { 129 // Expand the next PC. 130 if len(ncallers) == 0 { 131 se.wasPanic = false 132 return ncallers, Frame{}, false 133 } 134 se.pcExpander.init(ncallers[0], se.wasPanic) 135 ncallers = ncallers[1:] 136 se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.funcID == funcID_sigpanic 137 if se.skip > 0 { 138 for ; se.skip > 0; se.skip-- { 139 se.pcExpander.next() 140 } 141 se.skip = 0 142 // Drop skipPleaseUseCallersFrames. 143 ncallers = ncallers[1:] 144 } 145 if !se.pcExpander.more { 146 // No symbolic information for this PC. 147 // However, we return at least one frame for 148 // every PC, so return an invalid frame. 149 return ncallers, Frame{}, len(ncallers) > 0 150 } 151 } 152 153 frame = se.pcExpander.next() 154 if elideWrapper && frame.File == "<autogenerated>" { 155 // Ignore autogenerated functions such as pointer 156 // method forwarding functions. These are an 157 // implementation detail that doesn't reflect the 158 // source code. 159 goto again 160 } 161 return ncallers, frame, se.pcExpander.more || len(ncallers) > 0 162 } 163 164 // A pcExpander expands a single PC into a sequence of Frames. 165 type pcExpander struct { 166 // more indicates that the next call to next will return a 167 // valid frame. 168 more bool 169 170 // pc is the pc being expanded. 171 pc uintptr 172 173 // frames is a pre-expanded set of Frames to return from the 174 // iterator. If this is set, then this is everything that will 175 // be returned from the iterator. 176 frames []Frame 177 178 // funcInfo is the funcInfo of the function containing pc. 179 funcInfo funcInfo 180 181 // inlTree is the inlining tree of the function containing pc. 182 inlTree *[1 << 20]inlinedCall 183 184 // file and line are the file name and line number of the next 185 // frame. 186 file string 187 line int32 188 189 // inlIndex is the inlining index of the next frame, or -1 if 190 // the next frame is an outermost frame. 191 inlIndex int32 192 } 193 194 // init initializes this pcExpander to expand pc. It sets ex.more if 195 // pc expands to any Frames. 196 // 197 // A pcExpander can be reused by calling init again. 198 // 199 // If pc was a "call" to sigpanic, panicCall should be true. In this 200 // case, pc is treated as the address of a faulting instruction 201 // instead of the return address of a call. 202 func (ex *pcExpander) init(pc uintptr, panicCall bool) { 203 ex.more = false 204 205 ex.funcInfo = findfunc(pc) 206 if !ex.funcInfo.valid() { 207 if cgoSymbolizer != nil { 208 // Pre-expand cgo frames. We could do this 209 // incrementally, too, but there's no way to 210 // avoid allocation in this case anyway. 211 ex.frames = expandCgoFrames(pc) 212 ex.more = len(ex.frames) > 0 213 } 214 return 215 } 216 217 ex.more = true 218 entry := ex.funcInfo.entry 219 ex.pc = pc 220 if ex.pc > entry && !panicCall { 221 ex.pc-- 222 } 223 224 // file and line are the innermost position at pc. 225 ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false) 226 227 // Get inlining tree at pc 228 inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree) 229 if inldata != nil { 230 ex.inlTree = (*[1 << 20]inlinedCall)(inldata) 231 ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil) 232 } else { 233 ex.inlTree = nil 234 ex.inlIndex = -1 235 } 236 } 237 238 // next returns the next Frame in the expansion of pc and sets ex.more 239 // if there are more Frames to follow. 240 func (ex *pcExpander) next() Frame { 241 if !ex.more { 242 return Frame{} 243 } 244 245 if len(ex.frames) > 0 { 246 // Return pre-expended frame. 247 frame := ex.frames[0] 248 ex.frames = ex.frames[1:] 249 ex.more = len(ex.frames) > 0 250 return frame 251 } 252 253 if ex.inlIndex >= 0 { 254 // Return inner inlined frame. 255 call := ex.inlTree[ex.inlIndex] 256 frame := Frame{ 257 PC: ex.pc, 258 Func: nil, // nil for inlined functions 259 Function: funcnameFromNameoff(ex.funcInfo, call.func_), 260 File: ex.file, 261 Line: int(ex.line), 262 Entry: ex.funcInfo.entry, 263 } 264 ex.file = funcfile(ex.funcInfo, call.file) 265 ex.line = call.line 266 ex.inlIndex = call.parent 267 return frame 268 } 269 270 // No inlining or pre-expanded frames. 271 ex.more = false 272 return Frame{ 273 PC: ex.pc, 274 Func: ex.funcInfo._Func(), 275 Function: funcname(ex.funcInfo), 276 File: ex.file, 277 Line: int(ex.line), 278 Entry: ex.funcInfo.entry, 279 } 280 } 281 282 // expandCgoFrames expands frame information for pc, known to be 283 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames 284 // returns nil if pc could not be expanded. 285 func expandCgoFrames(pc uintptr) []Frame { 286 arg := cgoSymbolizerArg{pc: pc} 287 callCgoSymbolizer(&arg) 288 289 if arg.file == nil && arg.funcName == nil { 290 // No useful information from symbolizer. 291 return nil 292 } 293 294 var frames []Frame 295 for { 296 frames = append(frames, Frame{ 297 PC: pc, 298 Func: nil, 299 Function: gostring(arg.funcName), 300 File: gostring(arg.file), 301 Line: int(arg.lineno), 302 Entry: arg.entry, 303 }) 304 if arg.more == 0 { 305 break 306 } 307 callCgoSymbolizer(&arg) 308 } 309 310 // No more frames for this PC. Tell the symbolizer we are done. 311 // We don't try to maintain a single cgoSymbolizerArg for the 312 // whole use of Frames, because there would be no good way to tell 313 // the symbolizer when we are done. 314 arg.pc = 0 315 callCgoSymbolizer(&arg) 316 317 return frames 318 } 319 320 // NOTE: Func does not expose the actual unexported fields, because we return *Func 321 // values to users, and we want to keep them from being able to overwrite the data 322 // with (say) *f = Func{}. 323 // All code operating on a *Func must call raw() to get the *_func 324 // or funcInfo() to get the funcInfo instead. 325 326 // A Func represents a Go function in the running binary. 327 type Func struct { 328 opaque struct{} // unexported field to disallow conversions 329 } 330 331 func (f *Func) raw() *_func { 332 return (*_func)(unsafe.Pointer(f)) 333 } 334 335 func (f *Func) funcInfo() funcInfo { 336 fn := f.raw() 337 return funcInfo{fn, findmoduledatap(fn.entry)} 338 } 339 340 // PCDATA and FUNCDATA table indexes. 341 // 342 // See funcdata.h and ../cmd/internal/obj/funcdata.go. 343 const ( 344 _PCDATA_StackMapIndex = 0 345 _PCDATA_InlTreeIndex = 1 346 _FUNCDATA_ArgsPointerMaps = 0 347 _FUNCDATA_LocalsPointerMaps = 1 348 _FUNCDATA_InlTree = 2 349 _ArgsSizeUnknown = -0x80000000 350 ) 351 352 // A FuncID identifies particular functions that need to be treated 353 // specially by the runtime. 354 // Note that in some situations involving plugins, there may be multiple 355 // copies of a particular special runtime function. 356 // Note: this list must match the list in cmd/internal/objabi/funcid.go. 357 type funcID uint32 358 359 const ( 360 funcID_normal funcID = iota // not a special function 361 funcID_goexit 362 funcID_jmpdefer 363 funcID_mcall 364 funcID_morestack 365 funcID_mstart 366 funcID_rt0_go 367 funcID_asmcgocall 368 funcID_sigpanic 369 funcID_runfinq 370 funcID_bgsweep 371 funcID_forcegchelper 372 funcID_timerproc 373 funcID_gcBgMarkWorker 374 funcID_systemstack_switch 375 funcID_systemstack 376 funcID_cgocallback_gofunc 377 funcID_gogo 378 funcID_externalthreadhandler 379 ) 380 381 // moduledata records information about the layout of the executable 382 // image. It is written by the linker. Any changes here must be 383 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 384 // moduledata is stored in statically allocated non-pointer memory; 385 // none of the pointers here are visible to the garbage collector. 386 type moduledata struct { 387 pclntable []byte 388 ftab []functab 389 filetab []uint32 390 findfunctab uintptr 391 minpc, maxpc uintptr 392 393 text, etext uintptr 394 noptrdata, enoptrdata uintptr 395 data, edata uintptr 396 bss, ebss uintptr 397 noptrbss, enoptrbss uintptr 398 end, gcdata, gcbss uintptr 399 types, etypes uintptr 400 401 textsectmap []textsect 402 typelinks []int32 // offsets from types 403 itablinks []*itab 404 405 ptab []ptabEntry 406 407 pluginpath string 408 pkghashes []modulehash 409 410 modulename string 411 modulehashes []modulehash 412 413 hasmain uint8 // 1 if module contains the main function, 0 otherwise 414 415 gcdatamask, gcbssmask bitvector 416 417 typemap map[typeOff]*_type // offset to *_rtype in previous module 418 419 bad bool // module failed to load and should be ignored 420 421 next *moduledata 422 } 423 424 // A modulehash is used to compare the ABI of a new module or a 425 // package in a new module with the loaded program. 426 // 427 // For each shared library a module links against, the linker creates an entry in the 428 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 429 // at link time and a pointer to the runtime abi hash. These are checked in 430 // moduledataverify1 below. 431 // 432 // For each loaded plugin, the pkghashes slice has a modulehash of the 433 // newly loaded package that can be used to check the plugin's version of 434 // a package against any previously loaded version of the package. 435 // This is done in plugin.lastmoduleinit. 436 type modulehash struct { 437 modulename string 438 linktimehash string 439 runtimehash *string 440 } 441 442 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 443 // 444 // These typemap objects are allocated at run time on the heap, but the 445 // only direct reference to them is in the moduledata, created by the 446 // linker and marked SNOPTRDATA so it is ignored by the GC. 447 // 448 // To make sure the map isn't collected, we keep a second reference here. 449 var pinnedTypemaps []map[typeOff]*_type 450 451 var firstmoduledata moduledata // linker symbol 452 var lastmoduledatap *moduledata // linker symbol 453 var modulesSlice *[]*moduledata // see activeModules 454 455 // activeModules returns a slice of active modules. 456 // 457 // A module is active once its gcdatamask and gcbssmask have been 458 // assembled and it is usable by the GC. 459 // 460 // This is nosplit/nowritebarrier because it is called by the 461 // cgo pointer checking code. 462 //go:nosplit 463 //go:nowritebarrier 464 func activeModules() []*moduledata { 465 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 466 if p == nil { 467 return nil 468 } 469 return *p 470 } 471 472 // modulesinit creates the active modules slice out of all loaded modules. 473 // 474 // When a module is first loaded by the dynamic linker, an .init_array 475 // function (written by cmd/link) is invoked to call addmoduledata, 476 // appending to the module to the linked list that starts with 477 // firstmoduledata. 478 // 479 // There are two times this can happen in the lifecycle of a Go 480 // program. First, if compiled with -linkshared, a number of modules 481 // built with -buildmode=shared can be loaded at program initialization. 482 // Second, a Go program can load a module while running that was built 483 // with -buildmode=plugin. 484 // 485 // After loading, this function is called which initializes the 486 // moduledata so it is usable by the GC and creates a new activeModules 487 // list. 488 // 489 // Only one goroutine may call modulesinit at a time. 490 func modulesinit() { 491 modules := new([]*moduledata) 492 for md := &firstmoduledata; md != nil; md = md.next { 493 if md.bad { 494 continue 495 } 496 *modules = append(*modules, md) 497 if md.gcdatamask == (bitvector{}) { 498 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 499 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 500 } 501 } 502 503 // Modules appear in the moduledata linked list in the order they are 504 // loaded by the dynamic loader, with one exception: the 505 // firstmoduledata itself the module that contains the runtime. This 506 // is not always the first module (when using -buildmode=shared, it 507 // is typically libstd.so, the second module). The order matters for 508 // typelinksinit, so we swap the first module with whatever module 509 // contains the main function. 510 // 511 // See Issue #18729. 512 for i, md := range *modules { 513 if md.hasmain != 0 { 514 (*modules)[0] = md 515 (*modules)[i] = &firstmoduledata 516 break 517 } 518 } 519 520 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 521 } 522 523 type functab struct { 524 entry uintptr 525 funcoff uintptr 526 } 527 528 // Mapping information for secondary text sections 529 530 type textsect struct { 531 vaddr uintptr // prelinked section vaddr 532 length uintptr // section length 533 baseaddr uintptr // relocated section address 534 } 535 536 const minfunc = 16 // minimum function size 537 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 538 539 // findfunctab is an array of these structures. 540 // Each bucket represents 4096 bytes of the text segment. 541 // Each subbucket represents 256 bytes of the text segment. 542 // To find a function given a pc, locate the bucket and subbucket for 543 // that pc. Add together the idx and subbucket value to obtain a 544 // function index. Then scan the functab array starting at that 545 // index to find the target function. 546 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 547 type findfuncbucket struct { 548 idx uint32 549 subbuckets [16]byte 550 } 551 552 func moduledataverify() { 553 for datap := &firstmoduledata; datap != nil; datap = datap.next { 554 moduledataverify1(datap) 555 } 556 } 557 558 const debugPcln = false 559 560 func moduledataverify1(datap *moduledata) { 561 // See golang.org/s/go12symtab for header: 0xfffffffb, 562 // two zero bytes, a byte giving the PC quantum, 563 // and a byte giving the pointer width in bytes. 564 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 565 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 566 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 567 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 568 throw("invalid function symbol table\n") 569 } 570 571 // ftab is lookup table for function by program counter. 572 nftab := len(datap.ftab) - 1 573 for i := 0; i < nftab; i++ { 574 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 575 if datap.ftab[i].entry > datap.ftab[i+1].entry { 576 f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 577 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 578 f2name := "end" 579 if i+1 < nftab { 580 f2name = funcname(f2) 581 } 582 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 583 for j := 0; j <= i; j++ { 584 print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n") 585 } 586 throw("invalid runtime symbol table") 587 } 588 } 589 590 if datap.minpc != datap.ftab[0].entry || 591 datap.maxpc != datap.ftab[nftab].entry { 592 throw("minpc or maxpc invalid") 593 } 594 595 for _, modulehash := range datap.modulehashes { 596 if modulehash.linktimehash != *modulehash.runtimehash { 597 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 598 throw("abi mismatch") 599 } 600 } 601 } 602 603 // FuncForPC returns a *Func describing the function that contains the 604 // given program counter address, or else nil. 605 // 606 // If pc represents multiple functions because of inlining, it returns 607 // the *Func describing the outermost function. 608 func FuncForPC(pc uintptr) *Func { 609 return findfunc(pc)._Func() 610 } 611 612 // Name returns the name of the function. 613 func (f *Func) Name() string { 614 if f == nil { 615 return "" 616 } 617 return funcname(f.funcInfo()) 618 } 619 620 // Entry returns the entry address of the function. 621 func (f *Func) Entry() uintptr { 622 return f.raw().entry 623 } 624 625 // FileLine returns the file name and line number of the 626 // source code corresponding to the program counter pc. 627 // The result will not be accurate if pc is not a program 628 // counter within f. 629 func (f *Func) FileLine(pc uintptr) (file string, line int) { 630 // Pass strict=false here, because anyone can call this function, 631 // and they might just be wrong about targetpc belonging to f. 632 file, line32 := funcline1(f.funcInfo(), pc, false) 633 return file, int(line32) 634 } 635 636 func findmoduledatap(pc uintptr) *moduledata { 637 for datap := &firstmoduledata; datap != nil; datap = datap.next { 638 if datap.minpc <= pc && pc < datap.maxpc { 639 return datap 640 } 641 } 642 return nil 643 } 644 645 type funcInfo struct { 646 *_func 647 datap *moduledata 648 } 649 650 func (f funcInfo) valid() bool { 651 return f._func != nil 652 } 653 654 func (f funcInfo) _Func() *Func { 655 return (*Func)(unsafe.Pointer(f._func)) 656 } 657 658 func findfunc(pc uintptr) funcInfo { 659 datap := findmoduledatap(pc) 660 if datap == nil { 661 return funcInfo{} 662 } 663 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 664 665 x := pc - datap.minpc 666 b := x / pcbucketsize 667 i := x % pcbucketsize / (pcbucketsize / nsub) 668 669 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 670 idx := ffb.idx + uint32(ffb.subbuckets[i]) 671 672 // If the idx is beyond the end of the ftab, set it to the end of the table and search backward. 673 // This situation can occur if multiple text sections are generated to handle large text sections 674 // and the linker has inserted jump tables between them. 675 676 if idx >= uint32(len(datap.ftab)) { 677 idx = uint32(len(datap.ftab) - 1) 678 } 679 if pc < datap.ftab[idx].entry { 680 // With multiple text sections, the idx might reference a function address that 681 // is higher than the pc being searched, so search backward until the matching address is found. 682 683 for datap.ftab[idx].entry > pc && idx > 0 { 684 idx-- 685 } 686 if idx == 0 { 687 throw("findfunc: bad findfunctab entry idx") 688 } 689 } else { 690 // linear search to find func with pc >= entry. 691 for datap.ftab[idx+1].entry <= pc { 692 idx++ 693 } 694 } 695 return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap} 696 } 697 698 type pcvalueCache struct { 699 entries [16]pcvalueCacheEnt 700 } 701 702 type pcvalueCacheEnt struct { 703 // targetpc and off together are the key of this cache entry. 704 targetpc uintptr 705 off int32 706 // val is the value of this cached pcvalue entry. 707 val int32 708 } 709 710 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 711 if off == 0 { 712 return -1 713 } 714 715 // Check the cache. This speeds up walks of deep stacks, which 716 // tend to have the same recursive functions over and over. 717 // 718 // This cache is small enough that full associativity is 719 // cheaper than doing the hashing for a less associative 720 // cache. 721 if cache != nil { 722 for i := range cache.entries { 723 // We check off first because we're more 724 // likely to have multiple entries with 725 // different offsets for the same targetpc 726 // than the other way around, so we'll usually 727 // fail in the first clause. 728 ent := &cache.entries[i] 729 if ent.off == off && ent.targetpc == targetpc { 730 return ent.val 731 } 732 } 733 } 734 735 if !f.valid() { 736 if strict && panicking == 0 { 737 print("runtime: no module data for ", hex(f.entry), "\n") 738 throw("no module data") 739 } 740 return -1 741 } 742 datap := f.datap 743 p := datap.pclntable[off:] 744 pc := f.entry 745 val := int32(-1) 746 for { 747 var ok bool 748 p, ok = step(p, &pc, &val, pc == f.entry) 749 if !ok { 750 break 751 } 752 if targetpc < pc { 753 // Replace a random entry in the cache. Random 754 // replacement prevents a performance cliff if 755 // a recursive stack's cycle is slightly 756 // larger than the cache. 757 if cache != nil { 758 ci := fastrandn(uint32(len(cache.entries))) 759 cache.entries[ci] = pcvalueCacheEnt{ 760 targetpc: targetpc, 761 off: off, 762 val: val, 763 } 764 } 765 766 return val 767 } 768 } 769 770 // If there was a table, it should have covered all program counters. 771 // If not, something is wrong. 772 if panicking != 0 || !strict { 773 return -1 774 } 775 776 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 777 778 p = datap.pclntable[off:] 779 pc = f.entry 780 val = -1 781 for { 782 var ok bool 783 p, ok = step(p, &pc, &val, pc == f.entry) 784 if !ok { 785 break 786 } 787 print("\tvalue=", val, " until pc=", hex(pc), "\n") 788 } 789 790 throw("invalid runtime symbol table") 791 return -1 792 } 793 794 func cfuncname(f funcInfo) *byte { 795 if !f.valid() || f.nameoff == 0 { 796 return nil 797 } 798 return &f.datap.pclntable[f.nameoff] 799 } 800 801 func funcname(f funcInfo) string { 802 return gostringnocopy(cfuncname(f)) 803 } 804 805 func funcnameFromNameoff(f funcInfo, nameoff int32) string { 806 datap := f.datap 807 if !f.valid() { 808 return "" 809 } 810 cstr := &datap.pclntable[nameoff] 811 return gostringnocopy(cstr) 812 } 813 814 func funcfile(f funcInfo, fileno int32) string { 815 datap := f.datap 816 if !f.valid() { 817 return "?" 818 } 819 return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 820 } 821 822 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { 823 datap := f.datap 824 if !f.valid() { 825 return "?", 0 826 } 827 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 828 line = pcvalue(f, f.pcln, targetpc, nil, strict) 829 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 830 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 831 return "?", 0 832 } 833 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 834 return 835 } 836 837 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 838 return funcline1(f, targetpc, true) 839 } 840 841 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { 842 x := pcvalue(f, f.pcsp, targetpc, cache, true) 843 if x&(sys.PtrSize-1) != 0 { 844 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 845 } 846 return x 847 } 848 849 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 850 if table < 0 || table >= f.npcdata { 851 return -1 852 } 853 off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 854 return pcvalue(f, off, targetpc, cache, true) 855 } 856 857 func funcdata(f funcInfo, i int32) unsafe.Pointer { 858 if i < 0 || i >= f.nfuncdata { 859 return nil 860 } 861 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 862 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 863 if uintptr(unsafe.Pointer(f._func))&4 != 0 { 864 println("runtime: misaligned func", f._func) 865 } 866 p = add(p, 4) 867 } 868 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 869 } 870 871 // step advances to the next pc, value pair in the encoded table. 872 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 873 // For both uvdelta and pcdelta, the common case (~70%) 874 // is that they are a single byte. If so, avoid calling readvarint. 875 uvdelta := uint32(p[0]) 876 if uvdelta == 0 && !first { 877 return nil, false 878 } 879 n := uint32(1) 880 if uvdelta&0x80 != 0 { 881 n, uvdelta = readvarint(p) 882 } 883 p = p[n:] 884 if uvdelta&1 != 0 { 885 uvdelta = ^(uvdelta >> 1) 886 } else { 887 uvdelta >>= 1 888 } 889 vdelta := int32(uvdelta) 890 pcdelta := uint32(p[0]) 891 n = 1 892 if pcdelta&0x80 != 0 { 893 n, pcdelta = readvarint(p) 894 } 895 p = p[n:] 896 *pc += uintptr(pcdelta * sys.PCQuantum) 897 *val += vdelta 898 return p, true 899 } 900 901 // readvarint reads a varint from p. 902 func readvarint(p []byte) (read uint32, val uint32) { 903 var v, shift, n uint32 904 for { 905 b := p[n] 906 n++ 907 v |= uint32(b&0x7F) << (shift & 31) 908 if b&0x80 == 0 { 909 break 910 } 911 shift += 7 912 } 913 return n, v 914 } 915 916 type stackmap struct { 917 n int32 // number of bitmaps 918 nbit int32 // number of bits in each bitmap 919 bytedata [1]byte // bitmaps, each starting on a byte boundary 920 } 921 922 //go:nowritebarrier 923 func stackmapdata(stkmap *stackmap, n int32) bitvector { 924 if n < 0 || n >= stkmap.n { 925 throw("stackmapdata: index out of range") 926 } 927 return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+7)>>3))))} 928 } 929 930 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. 931 type inlinedCall struct { 932 parent int32 // index of parent in the inltree, or < 0 933 file int32 // fileno index into filetab 934 line int32 // line number of the call site 935 func_ int32 // offset into pclntab for name of called function 936 }