github.com/flyinox/gosm@v0.0.0-20171117061539-16768cb62077/src/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 // callers is a slice of PCs that have not yet been expanded. 17 callers []uintptr 18 19 // stackExpander expands callers into a sequence of Frames, 20 // tracking the necessary state across PCs. 21 stackExpander stackExpander 22 } 23 24 // Frame is the information returned by Frames for each call frame. 25 type Frame struct { 26 // PC is the program counter for the location in this frame. 27 // For a frame that calls another frame, this will be the 28 // program counter of a call instruction. Because of inlining, 29 // multiple frames may have the same PC value, but different 30 // symbolic information. 31 PC uintptr 32 33 // Func is the Func value of this call frame. This may be nil 34 // for non-Go code or fully inlined functions. 35 Func *Func 36 37 // Function is the package path-qualified function name of 38 // this call frame. If non-empty, this string uniquely 39 // identifies a single function in the program. 40 // This may be the empty string if not known. 41 // If Func is not nil then Function == Func.Name(). 42 Function string 43 44 // File and Line are the file name and line number of the 45 // location in this frame. For non-leaf frames, this will be 46 // the location of a call. These may be the empty string and 47 // zero, respectively, if not known. 48 File string 49 Line int 50 51 // Entry point program counter for the function; may be zero 52 // if not known. If Func is not nil then Entry == 53 // Func.Entry(). 54 Entry uintptr 55 } 56 57 // stackExpander expands a call stack of PCs into a sequence of 58 // Frames. It tracks state across PCs necessary to perform this 59 // expansion. 60 // 61 // This is the core of the Frames implementation, but is a separate 62 // internal API to make it possible to use within the runtime without 63 // heap-allocating the PC slice. The only difference with the public 64 // Frames API is that the caller is responsible for threading the PC 65 // slice between expansion steps in this API. If escape analysis were 66 // smarter, we may not need this (though it may have to be a lot 67 // smarter). 68 type stackExpander struct { 69 // pcExpander expands the current PC into a sequence of Frames. 70 pcExpander pcExpander 71 72 // If previous caller in iteration was a panic, then the next 73 // PC in the call stack is the address of the faulting 74 // instruction instead of the return address of the call. 75 wasPanic bool 76 77 // skip > 0 indicates that skip frames in the expansion of the 78 // first PC should be skipped over and callers[1] should also 79 // be skipped. 80 skip int 81 } 82 83 // CallersFrames takes a slice of PC values returned by Callers and 84 // prepares to return function/file/line information. 85 // Do not change the slice until you are done with the Frames. 86 func CallersFrames(callers []uintptr) *Frames { 87 ci := &Frames{} 88 ci.callers = ci.stackExpander.init(callers) 89 return ci 90 } 91 92 func (se *stackExpander) init(callers []uintptr) []uintptr { 93 if len(callers) >= 1 { 94 pc := callers[0] 95 s := pc - skipPC 96 if s >= 0 && s < sizeofSkipFunction { 97 // Ignore skip frame callers[0] since this means the caller trimmed the PC slice. 98 return callers[1:] 99 } 100 } 101 if len(callers) >= 2 { 102 pc := callers[1] 103 s := pc - skipPC 104 if s > 0 && s < sizeofSkipFunction { 105 // Skip the first s inlined frames when we expand the first PC. 106 se.skip = int(s) 107 } 108 } 109 return callers 110 } 111 112 // Next returns frame information for the next caller. 113 // If more is false, there are no more callers (the Frame value is valid). 114 func (ci *Frames) Next() (frame Frame, more bool) { 115 ci.callers, frame, more = ci.stackExpander.next(ci.callers) 116 return 117 } 118 119 func (se *stackExpander) next(callers []uintptr) (ncallers []uintptr, frame Frame, more bool) { 120 ncallers = callers 121 if !se.pcExpander.more { 122 // Expand the next PC. 123 if len(ncallers) == 0 { 124 se.wasPanic = false 125 return ncallers, Frame{}, false 126 } 127 se.pcExpander.init(ncallers[0], se.wasPanic) 128 ncallers = ncallers[1:] 129 se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.entry == sigpanicPC 130 if se.skip > 0 { 131 for ; se.skip > 0; se.skip-- { 132 se.pcExpander.next() 133 } 134 se.skip = 0 135 // Drop skipPleaseUseCallersFrames. 136 ncallers = ncallers[1:] 137 } 138 if !se.pcExpander.more { 139 // No symbolic information for this PC. 140 // However, we return at least one frame for 141 // every PC, so return an invalid frame. 142 return ncallers, Frame{}, len(ncallers) > 0 143 } 144 } 145 146 frame = se.pcExpander.next() 147 return ncallers, frame, se.pcExpander.more || len(ncallers) > 0 148 } 149 150 // A pcExpander expands a single PC into a sequence of Frames. 151 type pcExpander struct { 152 // more indicates that the next call to next will return a 153 // valid frame. 154 more bool 155 156 // pc is the pc being expanded. 157 pc uintptr 158 159 // frames is a pre-expanded set of Frames to return from the 160 // iterator. If this is set, then this is everything that will 161 // be returned from the iterator. 162 frames []Frame 163 164 // funcInfo is the funcInfo of the function containing pc. 165 funcInfo funcInfo 166 167 // inlTree is the inlining tree of the function containing pc. 168 inlTree *[1 << 20]inlinedCall 169 170 // file and line are the file name and line number of the next 171 // frame. 172 file string 173 line int32 174 175 // inlIndex is the inlining index of the next frame, or -1 if 176 // the next frame is an outermost frame. 177 inlIndex int32 178 } 179 180 // init initializes this pcExpander to expand pc. It sets ex.more if 181 // pc expands to any Frames. 182 // 183 // A pcExpander can be reused by calling init again. 184 // 185 // If pc was a "call" to sigpanic, panicCall should be true. In this 186 // case, pc is treated as the address of a faulting instruction 187 // instead of the return address of a call. 188 func (ex *pcExpander) init(pc uintptr, panicCall bool) { 189 ex.more = false 190 191 ex.funcInfo = findfunc(pc) 192 if !ex.funcInfo.valid() { 193 if cgoSymbolizer != nil { 194 // Pre-expand cgo frames. We could do this 195 // incrementally, too, but there's no way to 196 // avoid allocation in this case anyway. 197 ex.frames = expandCgoFrames(pc) 198 ex.more = len(ex.frames) > 0 199 } 200 return 201 } 202 203 ex.more = true 204 entry := ex.funcInfo.entry 205 ex.pc = pc 206 if ex.pc > entry && !panicCall { 207 ex.pc-- 208 } 209 210 // file and line are the innermost position at pc. 211 ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false) 212 213 // Get inlining tree at pc 214 inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree) 215 if inldata != nil { 216 ex.inlTree = (*[1 << 20]inlinedCall)(inldata) 217 ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil) 218 } else { 219 ex.inlTree = nil 220 ex.inlIndex = -1 221 } 222 } 223 224 // next returns the next Frame in the expansion of pc and sets ex.more 225 // if there are more Frames to follow. 226 func (ex *pcExpander) next() Frame { 227 if !ex.more { 228 return Frame{} 229 } 230 231 if len(ex.frames) > 0 { 232 // Return pre-expended frame. 233 frame := ex.frames[0] 234 ex.frames = ex.frames[1:] 235 ex.more = len(ex.frames) > 0 236 return frame 237 } 238 239 if ex.inlIndex >= 0 { 240 // Return inner inlined frame. 241 call := ex.inlTree[ex.inlIndex] 242 frame := Frame{ 243 PC: ex.pc, 244 Func: nil, // nil for inlined functions 245 Function: funcnameFromNameoff(ex.funcInfo, call.func_), 246 File: ex.file, 247 Line: int(ex.line), 248 Entry: ex.funcInfo.entry, 249 } 250 ex.file = funcfile(ex.funcInfo, call.file) 251 ex.line = call.line 252 ex.inlIndex = call.parent 253 return frame 254 } 255 256 // No inlining or pre-expanded frames. 257 ex.more = false 258 return Frame{ 259 PC: ex.pc, 260 Func: ex.funcInfo._Func(), 261 Function: funcname(ex.funcInfo), 262 File: ex.file, 263 Line: int(ex.line), 264 Entry: ex.funcInfo.entry, 265 } 266 } 267 268 // expandCgoFrames expands frame information for pc, known to be 269 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames 270 // returns nil if pc could not be expanded. 271 func expandCgoFrames(pc uintptr) []Frame { 272 arg := cgoSymbolizerArg{pc: pc} 273 callCgoSymbolizer(&arg) 274 275 if arg.file == nil && arg.funcName == nil { 276 // No useful information from symbolizer. 277 return nil 278 } 279 280 var frames []Frame 281 for { 282 frames = append(frames, Frame{ 283 PC: pc, 284 Func: nil, 285 Function: gostring(arg.funcName), 286 File: gostring(arg.file), 287 Line: int(arg.lineno), 288 Entry: arg.entry, 289 }) 290 if arg.more == 0 { 291 break 292 } 293 callCgoSymbolizer(&arg) 294 } 295 296 // No more frames for this PC. Tell the symbolizer we are done. 297 // We don't try to maintain a single cgoSymbolizerArg for the 298 // whole use of Frames, because there would be no good way to tell 299 // the symbolizer when we are done. 300 arg.pc = 0 301 callCgoSymbolizer(&arg) 302 303 return frames 304 } 305 306 // NOTE: Func does not expose the actual unexported fields, because we return *Func 307 // values to users, and we want to keep them from being able to overwrite the data 308 // with (say) *f = Func{}. 309 // All code operating on a *Func must call raw() to get the *_func 310 // or funcInfo() to get the funcInfo instead. 311 312 // A Func represents a Go function in the running binary. 313 type Func struct { 314 opaque struct{} // unexported field to disallow conversions 315 } 316 317 func (f *Func) raw() *_func { 318 return (*_func)(unsafe.Pointer(f)) 319 } 320 321 func (f *Func) funcInfo() funcInfo { 322 fn := f.raw() 323 return funcInfo{fn, findmoduledatap(fn.entry)} 324 } 325 326 // PCDATA and FUNCDATA table indexes. 327 // 328 // See funcdata.h and ../cmd/internal/obj/funcdata.go. 329 const ( 330 _PCDATA_StackMapIndex = 0 331 _PCDATA_InlTreeIndex = 1 332 _FUNCDATA_ArgsPointerMaps = 0 333 _FUNCDATA_LocalsPointerMaps = 1 334 _FUNCDATA_InlTree = 2 335 _ArgsSizeUnknown = -0x80000000 336 ) 337 338 // moduledata records information about the layout of the executable 339 // image. It is written by the linker. Any changes here must be 340 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 341 // moduledata is stored in read-only memory; none of the pointers here 342 // are visible to the garbage collector. 343 type moduledata struct { 344 pclntable []byte 345 ftab []functab 346 filetab []uint32 347 findfunctab uintptr 348 minpc, maxpc uintptr 349 350 text, etext uintptr 351 noptrdata, enoptrdata uintptr 352 data, edata uintptr 353 bss, ebss uintptr 354 noptrbss, enoptrbss uintptr 355 end, gcdata, gcbss uintptr 356 types, etypes uintptr 357 358 textsectmap []textsect 359 typelinks []int32 // offsets from types 360 itablinks []*itab 361 362 ptab []ptabEntry 363 364 pluginpath string 365 pkghashes []modulehash 366 367 modulename string 368 modulehashes []modulehash 369 370 gcdatamask, gcbssmask bitvector 371 372 typemap map[typeOff]*_type // offset to *_rtype in previous module 373 374 next *moduledata 375 } 376 377 // A modulehash is used to compare the ABI of a new module or a 378 // package in a new module with the loaded program. 379 // 380 // For each shared library a module links against, the linker creates an entry in the 381 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 382 // at link time and a pointer to the runtime abi hash. These are checked in 383 // moduledataverify1 below. 384 // 385 // For each loaded plugin, the pkghashes slice has a modulehash of the 386 // newly loaded package that can be used to check the plugin's version of 387 // a package against any previously loaded version of the package. 388 // This is done in plugin.lastmoduleinit. 389 type modulehash struct { 390 modulename string 391 linktimehash string 392 runtimehash *string 393 } 394 395 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 396 // 397 // These typemap objects are allocated at run time on the heap, but the 398 // only direct reference to them is in the moduledata, created by the 399 // linker and marked SNOPTRDATA so it is ignored by the GC. 400 // 401 // To make sure the map isn't collected, we keep a second reference here. 402 var pinnedTypemaps []map[typeOff]*_type 403 404 var firstmoduledata moduledata // linker symbol 405 var lastmoduledatap *moduledata // linker symbol 406 var modulesSlice unsafe.Pointer // see activeModules 407 408 // activeModules returns a slice of active modules. 409 // 410 // A module is active once its gcdatamask and gcbssmask have been 411 // assembled and it is usable by the GC. 412 // 413 // This is nosplit/nowritebarrier because it is called by the 414 // cgo pointer checking code. 415 //go:nosplit 416 //go:nowritebarrier 417 func activeModules() []*moduledata { 418 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 419 if p == nil { 420 return nil 421 } 422 return *p 423 } 424 425 // modulesinit creates the active modules slice out of all loaded modules. 426 // 427 // When a module is first loaded by the dynamic linker, an .init_array 428 // function (written by cmd/link) is invoked to call addmoduledata, 429 // appending to the module to the linked list that starts with 430 // firstmoduledata. 431 // 432 // There are two times this can happen in the lifecycle of a Go 433 // program. First, if compiled with -linkshared, a number of modules 434 // built with -buildmode=shared can be loaded at program initialization. 435 // Second, a Go program can load a module while running that was built 436 // with -buildmode=plugin. 437 // 438 // After loading, this function is called which initializes the 439 // moduledata so it is usable by the GC and creates a new activeModules 440 // list. 441 // 442 // Only one goroutine may call modulesinit at a time. 443 func modulesinit() { 444 modules := new([]*moduledata) 445 for md := &firstmoduledata; md != nil; md = md.next { 446 *modules = append(*modules, md) 447 if md.gcdatamask == (bitvector{}) { 448 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 449 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 450 } 451 } 452 453 // Modules appear in the moduledata linked list in the order they are 454 // loaded by the dynamic loader, with one exception: the 455 // firstmoduledata itself the module that contains the runtime. This 456 // is not always the first module (when using -buildmode=shared, it 457 // is typically libstd.so, the second module). The order matters for 458 // typelinksinit, so we swap the first module with whatever module 459 // contains the main function. 460 // 461 // See Issue #18729. 462 mainText := funcPC(main_main) 463 for i, md := range *modules { 464 if md.text <= mainText && mainText <= md.etext { 465 (*modules)[0] = md 466 (*modules)[i] = &firstmoduledata 467 break 468 } 469 } 470 471 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 472 } 473 474 type functab struct { 475 entry uintptr 476 funcoff uintptr 477 } 478 479 // Mapping information for secondary text sections 480 481 type textsect struct { 482 vaddr uintptr // prelinked section vaddr 483 length uintptr // section length 484 baseaddr uintptr // relocated section address 485 } 486 487 const minfunc = 16 // minimum function size 488 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 489 490 // findfunctab is an array of these structures. 491 // Each bucket represents 4096 bytes of the text segment. 492 // Each subbucket represents 256 bytes of the text segment. 493 // To find a function given a pc, locate the bucket and subbucket for 494 // that pc. Add together the idx and subbucket value to obtain a 495 // function index. Then scan the functab array starting at that 496 // index to find the target function. 497 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 498 type findfuncbucket struct { 499 idx uint32 500 subbuckets [16]byte 501 } 502 503 func moduledataverify() { 504 for datap := &firstmoduledata; datap != nil; datap = datap.next { 505 moduledataverify1(datap) 506 } 507 } 508 509 const debugPcln = false 510 511 func moduledataverify1(datap *moduledata) { 512 // See golang.org/s/go12symtab for header: 0xfffffffb, 513 // two zero bytes, a byte giving the PC quantum, 514 // and a byte giving the pointer width in bytes. 515 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 516 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 517 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 518 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 519 throw("invalid function symbol table\n") 520 } 521 522 // ftab is lookup table for function by program counter. 523 nftab := len(datap.ftab) - 1 524 var pcCache pcvalueCache 525 for i := 0; i < nftab; i++ { 526 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 527 if datap.ftab[i].entry > datap.ftab[i+1].entry { 528 f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 529 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 530 f2name := "end" 531 if i+1 < nftab { 532 f2name = funcname(f2) 533 } 534 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 535 for j := 0; j <= i; j++ { 536 print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n") 537 } 538 throw("invalid runtime symbol table") 539 } 540 541 if debugPcln || nftab-i < 5 { 542 // Check a PC near but not at the very end. 543 // The very end might be just padding that is not covered by the tables. 544 // No architecture rounds function entries to more than 16 bytes, 545 // but if one came along we'd need to subtract more here. 546 // But don't use the next PC if it corresponds to a foreign object chunk 547 // (no pcln table, f2.pcln == 0). That chunk might have an alignment 548 // more than 16 bytes. 549 f := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 550 end := f.entry 551 if i+1 < nftab { 552 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 553 if f2.pcln != 0 { 554 end = f2.entry - 16 555 if end < f.entry { 556 end = f.entry 557 } 558 } 559 } 560 pcvalue(f, f.pcfile, end, &pcCache, true) 561 pcvalue(f, f.pcln, end, &pcCache, true) 562 pcvalue(f, f.pcsp, end, &pcCache, true) 563 } 564 } 565 566 if datap.minpc != datap.ftab[0].entry || 567 datap.maxpc != datap.ftab[nftab].entry { 568 throw("minpc or maxpc invalid") 569 } 570 571 for _, modulehash := range datap.modulehashes { 572 if modulehash.linktimehash != *modulehash.runtimehash { 573 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 574 throw("abi mismatch") 575 } 576 } 577 } 578 579 // FuncForPC returns a *Func describing the function that contains the 580 // given program counter address, or else nil. 581 // 582 // If pc represents multiple functions because of inlining, it returns 583 // the *Func describing the outermost function. 584 func FuncForPC(pc uintptr) *Func { 585 return findfunc(pc)._Func() 586 } 587 588 // Name returns the name of the function. 589 func (f *Func) Name() string { 590 if f == nil { 591 return "" 592 } 593 return funcname(f.funcInfo()) 594 } 595 596 // Entry returns the entry address of the function. 597 func (f *Func) Entry() uintptr { 598 return f.raw().entry 599 } 600 601 // FileLine returns the file name and line number of the 602 // source code corresponding to the program counter pc. 603 // The result will not be accurate if pc is not a program 604 // counter within f. 605 func (f *Func) FileLine(pc uintptr) (file string, line int) { 606 // Pass strict=false here, because anyone can call this function, 607 // and they might just be wrong about targetpc belonging to f. 608 file, line32 := funcline1(f.funcInfo(), pc, false) 609 return file, int(line32) 610 } 611 612 func findmoduledatap(pc uintptr) *moduledata { 613 for datap := &firstmoduledata; datap != nil; datap = datap.next { 614 if datap.minpc <= pc && pc < datap.maxpc { 615 return datap 616 } 617 } 618 return nil 619 } 620 621 type funcInfo struct { 622 *_func 623 datap *moduledata 624 } 625 626 func (f funcInfo) valid() bool { 627 return f._func != nil 628 } 629 630 func (f funcInfo) _Func() *Func { 631 return (*Func)(unsafe.Pointer(f._func)) 632 } 633 634 func findfunc(pc uintptr) funcInfo { 635 datap := findmoduledatap(pc) 636 if datap == nil { 637 return funcInfo{} 638 } 639 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 640 641 x := pc - datap.minpc 642 b := x / pcbucketsize 643 i := x % pcbucketsize / (pcbucketsize / nsub) 644 645 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 646 idx := ffb.idx + uint32(ffb.subbuckets[i]) 647 648 // If the idx is beyond the end of the ftab, set it to the end of the table and search backward. 649 // This situation can occur if multiple text sections are generated to handle large text sections 650 // and the linker has inserted jump tables between them. 651 652 if idx >= uint32(len(datap.ftab)) { 653 idx = uint32(len(datap.ftab) - 1) 654 } 655 if pc < datap.ftab[idx].entry { 656 657 // With multiple text sections, the idx might reference a function address that 658 // is higher than the pc being searched, so search backward until the matching address is found. 659 660 for datap.ftab[idx].entry > pc && idx > 0 { 661 idx-- 662 } 663 if idx == 0 { 664 throw("findfunc: bad findfunctab entry idx") 665 } 666 } else { 667 668 // linear search to find func with pc >= entry. 669 for datap.ftab[idx+1].entry <= pc { 670 idx++ 671 } 672 } 673 return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap} 674 } 675 676 type pcvalueCache struct { 677 entries [16]pcvalueCacheEnt 678 } 679 680 type pcvalueCacheEnt struct { 681 // targetpc and off together are the key of this cache entry. 682 targetpc uintptr 683 off int32 684 // val is the value of this cached pcvalue entry. 685 val int32 686 } 687 688 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 689 if off == 0 { 690 return -1 691 } 692 693 // Check the cache. This speeds up walks of deep stacks, which 694 // tend to have the same recursive functions over and over. 695 // 696 // This cache is small enough that full associativity is 697 // cheaper than doing the hashing for a less associative 698 // cache. 699 if cache != nil { 700 for i := range cache.entries { 701 // We check off first because we're more 702 // likely to have multiple entries with 703 // different offsets for the same targetpc 704 // than the other way around, so we'll usually 705 // fail in the first clause. 706 ent := &cache.entries[i] 707 if ent.off == off && ent.targetpc == targetpc { 708 return ent.val 709 } 710 } 711 } 712 713 if !f.valid() { 714 if strict && panicking == 0 { 715 print("runtime: no module data for ", hex(f.entry), "\n") 716 throw("no module data") 717 } 718 return -1 719 } 720 datap := f.datap 721 p := datap.pclntable[off:] 722 pc := f.entry 723 val := int32(-1) 724 for { 725 var ok bool 726 p, ok = step(p, &pc, &val, pc == f.entry) 727 if !ok { 728 break 729 } 730 if targetpc < pc { 731 // Replace a random entry in the cache. Random 732 // replacement prevents a performance cliff if 733 // a recursive stack's cycle is slightly 734 // larger than the cache. 735 if cache != nil { 736 ci := fastrandn(uint32(len(cache.entries))) 737 cache.entries[ci] = pcvalueCacheEnt{ 738 targetpc: targetpc, 739 off: off, 740 val: val, 741 } 742 } 743 744 return val 745 } 746 } 747 748 // If there was a table, it should have covered all program counters. 749 // If not, something is wrong. 750 if panicking != 0 || !strict { 751 return -1 752 } 753 754 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 755 756 p = datap.pclntable[off:] 757 pc = f.entry 758 val = -1 759 for { 760 var ok bool 761 p, ok = step(p, &pc, &val, pc == f.entry) 762 if !ok { 763 break 764 } 765 print("\tvalue=", val, " until pc=", hex(pc), "\n") 766 } 767 768 throw("invalid runtime symbol table") 769 return -1 770 } 771 772 func cfuncname(f funcInfo) *byte { 773 if !f.valid() || f.nameoff == 0 { 774 return nil 775 } 776 return &f.datap.pclntable[f.nameoff] 777 } 778 779 func funcname(f funcInfo) string { 780 return gostringnocopy(cfuncname(f)) 781 } 782 783 func funcnameFromNameoff(f funcInfo, nameoff int32) string { 784 datap := f.datap 785 if !f.valid() { 786 return "" 787 } 788 cstr := &datap.pclntable[nameoff] 789 return gostringnocopy(cstr) 790 } 791 792 func funcfile(f funcInfo, fileno int32) string { 793 datap := f.datap 794 if !f.valid() { 795 return "?" 796 } 797 return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 798 } 799 800 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { 801 datap := f.datap 802 if !f.valid() { 803 return "?", 0 804 } 805 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 806 line = pcvalue(f, f.pcln, targetpc, nil, strict) 807 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 808 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 809 return "?", 0 810 } 811 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 812 return 813 } 814 815 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 816 return funcline1(f, targetpc, true) 817 } 818 819 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { 820 x := pcvalue(f, f.pcsp, targetpc, cache, true) 821 if x&(sys.PtrSize-1) != 0 { 822 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 823 } 824 return x 825 } 826 827 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 828 if table < 0 || table >= f.npcdata { 829 return -1 830 } 831 off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 832 return pcvalue(f, off, targetpc, cache, true) 833 } 834 835 func funcdata(f funcInfo, i int32) unsafe.Pointer { 836 if i < 0 || i >= f.nfuncdata { 837 return nil 838 } 839 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 840 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 841 if uintptr(unsafe.Pointer(f._func))&4 != 0 { 842 println("runtime: misaligned func", f._func) 843 } 844 p = add(p, 4) 845 } 846 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 847 } 848 849 // step advances to the next pc, value pair in the encoded table. 850 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 851 // For both uvdelta and pcdelta, the common case (~70%) 852 // is that they are a single byte. If so, avoid calling readvarint. 853 uvdelta := uint32(p[0]) 854 if uvdelta == 0 && !first { 855 return nil, false 856 } 857 n := uint32(1) 858 if uvdelta&0x80 != 0 { 859 n, uvdelta = readvarint(p) 860 } 861 p = p[n:] 862 if uvdelta&1 != 0 { 863 uvdelta = ^(uvdelta >> 1) 864 } else { 865 uvdelta >>= 1 866 } 867 vdelta := int32(uvdelta) 868 pcdelta := uint32(p[0]) 869 n = 1 870 if pcdelta&0x80 != 0 { 871 n, pcdelta = readvarint(p) 872 } 873 p = p[n:] 874 *pc += uintptr(pcdelta * sys.PCQuantum) 875 *val += vdelta 876 return p, true 877 } 878 879 // readvarint reads a varint from p. 880 func readvarint(p []byte) (read uint32, val uint32) { 881 var v, shift, n uint32 882 for { 883 b := p[n] 884 n++ 885 v |= uint32(b&0x7F) << (shift & 31) 886 if b&0x80 == 0 { 887 break 888 } 889 shift += 7 890 } 891 return n, v 892 } 893 894 type stackmap struct { 895 n int32 // number of bitmaps 896 nbit int32 // number of bits in each bitmap 897 bytedata [1]byte // bitmaps, each starting on a byte boundary 898 } 899 900 //go:nowritebarrier 901 func stackmapdata(stkmap *stackmap, n int32) bitvector { 902 if n < 0 || n >= stkmap.n { 903 throw("stackmapdata: index out of range") 904 } 905 return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+7)>>3))))} 906 } 907 908 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. 909 type inlinedCall struct { 910 parent int32 // index of parent in the inltree, or < 0 911 file int32 // fileno index into filetab 912 line int32 // line number of the call site 913 func_ int32 // offset into pclntab for name of called function 914 }