github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 // callers is a slice of PCs that have not yet been expanded. 17 callers []uintptr 18 19 // stackExpander expands callers into a sequence of Frames, 20 // tracking the necessary state across PCs. 21 stackExpander stackExpander 22 } 23 24 // Frame is the information returned by Frames for each call frame. 25 type Frame struct { 26 // PC is the program counter for the location in this frame. 27 // For a frame that calls another frame, this will be the 28 // program counter of a call instruction. Because of inlining, 29 // multiple frames may have the same PC value, but different 30 // symbolic information. 31 PC uintptr 32 33 // Func is the Func value of this call frame. This may be nil 34 // for non-Go code or fully inlined functions. 35 Func *Func 36 37 // Function is the package path-qualified function name of 38 // this call frame. If non-empty, this string uniquely 39 // identifies a single function in the program. 40 // This may be the empty string if not known. 41 // If Func is not nil then Function == Func.Name(). 42 Function string 43 44 // File and Line are the file name and line number of the 45 // location in this frame. For non-leaf frames, this will be 46 // the location of a call. These may be the empty string and 47 // zero, respectively, if not known. 48 File string 49 Line int 50 51 // Entry point program counter for the function; may be zero 52 // if not known. If Func is not nil then Entry == 53 // Func.Entry(). 54 Entry uintptr 55 } 56 57 // stackExpander expands a call stack of PCs into a sequence of 58 // Frames. It tracks state across PCs necessary to perform this 59 // expansion. 60 // 61 // This is the core of the Frames implementation, but is a separate 62 // internal API to make it possible to use within the runtime without 63 // heap-allocating the PC slice. The only difference with the public 64 // Frames API is that the caller is responsible for threading the PC 65 // slice between expansion steps in this API. If escape analysis were 66 // smarter, we may not need this (though it may have to be a lot 67 // smarter). 68 type stackExpander struct { 69 // pcExpander expands the current PC into a sequence of Frames. 70 pcExpander pcExpander 71 72 // If previous caller in iteration was a panic, then the next 73 // PC in the call stack is the address of the faulting 74 // instruction instead of the return address of the call. 75 wasPanic bool 76 77 // skip > 0 indicates that skip frames in the expansion of the 78 // first PC should be skipped over and callers[1] should also 79 // be skipped. 80 skip int 81 } 82 83 // CallersFrames takes a slice of PC values returned by Callers and 84 // prepares to return function/file/line information. 85 // Do not change the slice until you are done with the Frames. 86 func CallersFrames(callers []uintptr) *Frames { 87 ci := &Frames{} 88 ci.callers = ci.stackExpander.init(callers) 89 return ci 90 } 91 92 func (se *stackExpander) init(callers []uintptr) []uintptr { 93 if len(callers) >= 1 { 94 pc := callers[0] 95 s := pc - skipPC 96 if s >= 0 && s < sizeofSkipFunction { 97 // Ignore skip frame callers[0] since this means the caller trimmed the PC slice. 98 return callers[1:] 99 } 100 } 101 if len(callers) >= 2 { 102 pc := callers[1] 103 s := pc - skipPC 104 if s > 0 && s < sizeofSkipFunction { 105 // Skip the first s inlined frames when we expand the first PC. 106 se.skip = int(s) 107 } 108 } 109 return callers 110 } 111 112 // Next returns frame information for the next caller. 113 // If more is false, there are no more callers (the Frame value is valid). 114 func (ci *Frames) Next() (frame Frame, more bool) { 115 ci.callers, frame, more = ci.stackExpander.next(ci.callers) 116 return 117 } 118 119 func (se *stackExpander) next(callers []uintptr) (ncallers []uintptr, frame Frame, more bool) { 120 ncallers = callers 121 if !se.pcExpander.more { 122 // Expand the next PC. 123 if len(ncallers) == 0 { 124 se.wasPanic = false 125 return ncallers, Frame{}, false 126 } 127 se.pcExpander.init(ncallers[0], se.wasPanic) 128 ncallers = ncallers[1:] 129 se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.entry == sigpanicPC 130 if se.skip > 0 { 131 for ; se.skip > 0; se.skip-- { 132 se.pcExpander.next() 133 } 134 se.skip = 0 135 // Drop skipPleaseUseCallersFrames. 136 ncallers = ncallers[1:] 137 } 138 if !se.pcExpander.more { 139 // No symbolic information for this PC. 140 // However, we return at least one frame for 141 // every PC, so return an invalid frame. 142 return ncallers, Frame{}, len(ncallers) > 0 143 } 144 } 145 146 frame = se.pcExpander.next() 147 return ncallers, frame, se.pcExpander.more || len(ncallers) > 0 148 } 149 150 // A pcExpander expands a single PC into a sequence of Frames. 151 type pcExpander struct { 152 // more indicates that the next call to next will return a 153 // valid frame. 154 more bool 155 156 // pc is the pc being expanded. 157 pc uintptr 158 159 // frames is a pre-expanded set of Frames to return from the 160 // iterator. If this is set, then this is everything that will 161 // be returned from the iterator. 162 frames []Frame 163 164 // funcInfo is the funcInfo of the function containing pc. 165 funcInfo funcInfo 166 167 // inlTree is the inlining tree of the function containing pc. 168 inlTree *[1 << 20]inlinedCall 169 170 // file and line are the file name and line number of the next 171 // frame. 172 file string 173 line int32 174 175 // inlIndex is the inlining index of the next frame, or -1 if 176 // the next frame is an outermost frame. 177 inlIndex int32 178 } 179 180 // init initializes this pcExpander to expand pc. It sets ex.more if 181 // pc expands to any Frames. 182 // 183 // A pcExpander can be reused by calling init again. 184 // 185 // If pc was a "call" to sigpanic, panicCall should be true. In this 186 // case, pc is treated as the address of a faulting instruction 187 // instead of the return address of a call. 188 func (ex *pcExpander) init(pc uintptr, panicCall bool) { 189 ex.more = false 190 191 ex.funcInfo = findfunc(pc) 192 if !ex.funcInfo.valid() { 193 if cgoSymbolizer != nil { 194 // Pre-expand cgo frames. We could do this 195 // incrementally, too, but there's no way to 196 // avoid allocation in this case anyway. 197 ex.frames = expandCgoFrames(pc) 198 ex.more = len(ex.frames) > 0 199 } 200 return 201 } 202 203 ex.more = true 204 entry := ex.funcInfo.entry 205 ex.pc = pc 206 if ex.pc > entry && !panicCall { 207 ex.pc-- 208 } 209 210 // file and line are the innermost position at pc. 211 ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false) 212 213 // Get inlining tree at pc 214 inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree) 215 if inldata != nil { 216 ex.inlTree = (*[1 << 20]inlinedCall)(inldata) 217 ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil) 218 } else { 219 ex.inlTree = nil 220 ex.inlIndex = -1 221 } 222 } 223 224 // next returns the next Frame in the expansion of pc and sets ex.more 225 // if there are more Frames to follow. 226 func (ex *pcExpander) next() Frame { 227 if !ex.more { 228 return Frame{} 229 } 230 231 if len(ex.frames) > 0 { 232 // Return pre-expended frame. 233 frame := ex.frames[0] 234 ex.frames = ex.frames[1:] 235 ex.more = len(ex.frames) > 0 236 return frame 237 } 238 239 if ex.inlIndex >= 0 { 240 // Return inner inlined frame. 241 call := ex.inlTree[ex.inlIndex] 242 frame := Frame{ 243 PC: ex.pc, 244 Func: nil, // nil for inlined functions 245 Function: funcnameFromNameoff(ex.funcInfo, call.func_), 246 File: ex.file, 247 Line: int(ex.line), 248 Entry: ex.funcInfo.entry, 249 } 250 ex.file = funcfile(ex.funcInfo, call.file) 251 ex.line = call.line 252 ex.inlIndex = call.parent 253 return frame 254 } 255 256 // No inlining or pre-expanded frames. 257 ex.more = false 258 return Frame{ 259 PC: ex.pc, 260 Func: ex.funcInfo._Func(), 261 Function: funcname(ex.funcInfo), 262 File: ex.file, 263 Line: int(ex.line), 264 Entry: ex.funcInfo.entry, 265 } 266 } 267 268 // expandCgoFrames expands frame information for pc, known to be 269 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames 270 // returns nil if pc could not be expanded. 271 func expandCgoFrames(pc uintptr) []Frame { 272 arg := cgoSymbolizerArg{pc: pc} 273 callCgoSymbolizer(&arg) 274 275 if arg.file == nil && arg.funcName == nil { 276 // No useful information from symbolizer. 277 return nil 278 } 279 280 var frames []Frame 281 for { 282 frames = append(frames, Frame{ 283 PC: pc, 284 Func: nil, 285 Function: gostring(arg.funcName), 286 File: gostring(arg.file), 287 Line: int(arg.lineno), 288 Entry: arg.entry, 289 }) 290 if arg.more == 0 { 291 break 292 } 293 callCgoSymbolizer(&arg) 294 } 295 296 // No more frames for this PC. Tell the symbolizer we are done. 297 // We don't try to maintain a single cgoSymbolizerArg for the 298 // whole use of Frames, because there would be no good way to tell 299 // the symbolizer when we are done. 300 arg.pc = 0 301 callCgoSymbolizer(&arg) 302 303 return frames 304 } 305 306 // NOTE: Func does not expose the actual unexported fields, because we return *Func 307 // values to users, and we want to keep them from being able to overwrite the data 308 // with (say) *f = Func{}. 309 // All code operating on a *Func must call raw() to get the *_func 310 // or funcInfo() to get the funcInfo instead. 311 312 // A Func represents a Go function in the running binary. 313 type Func struct { 314 opaque struct{} // unexported field to disallow conversions 315 } 316 317 func (f *Func) raw() *_func { 318 return (*_func)(unsafe.Pointer(f)) 319 } 320 321 func (f *Func) funcInfo() funcInfo { 322 fn := f.raw() 323 return funcInfo{fn, findmoduledatap(fn.entry)} 324 } 325 326 // PCDATA and FUNCDATA table indexes. 327 // 328 // See funcdata.h and ../cmd/internal/obj/funcdata.go. 329 const ( 330 _PCDATA_StackMapIndex = 0 331 _PCDATA_InlTreeIndex = 1 332 _FUNCDATA_ArgsPointerMaps = 0 333 _FUNCDATA_LocalsPointerMaps = 1 334 _FUNCDATA_InlTree = 2 335 _ArgsSizeUnknown = -0x80000000 336 ) 337 338 // moduledata records information about the layout of the executable 339 // image. It is written by the linker. Any changes here must be 340 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 341 // moduledata is stored in read-only memory; none of the pointers here 342 // are visible to the garbage collector. 343 type moduledata struct { 344 pclntable []byte 345 ftab []functab 346 filetab []uint32 347 findfunctab uintptr 348 minpc, maxpc uintptr 349 350 text, etext uintptr 351 noptrdata, enoptrdata uintptr 352 data, edata uintptr 353 bss, ebss uintptr 354 noptrbss, enoptrbss uintptr 355 end, gcdata, gcbss uintptr 356 types, etypes uintptr 357 358 textsectmap []textsect 359 typelinks []int32 // offsets from types 360 itablinks []*itab 361 362 ptab []ptabEntry 363 364 pluginpath string 365 pkghashes []modulehash 366 367 modulename string 368 modulehashes []modulehash 369 370 gcdatamask, gcbssmask bitvector 371 372 typemap map[typeOff]*_type // offset to *_rtype in previous module 373 374 next *moduledata 375 } 376 377 // A modulehash is used to compare the ABI of a new module or a 378 // package in a new module with the loaded program. 379 // 380 // For each shared library a module links against, the linker creates an entry in the 381 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 382 // at link time and a pointer to the runtime abi hash. These are checked in 383 // moduledataverify1 below. 384 // 385 // For each loaded plugin, the the pkghashes slice has a modulehash of the 386 // newly loaded package that can be used to check the plugin's version of 387 // a package against any previously loaded version of the package. 388 // This is done in plugin.lastmoduleinit. 389 type modulehash struct { 390 modulename string 391 linktimehash string 392 runtimehash *string 393 } 394 395 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 396 // 397 // These typemap objects are allocated at run time on the heap, but the 398 // only direct reference to them is in the moduledata, created by the 399 // linker and marked SNOPTRDATA so it is ignored by the GC. 400 // 401 // To make sure the map isn't collected, we keep a second reference here. 402 var pinnedTypemaps []map[typeOff]*_type 403 404 var firstmoduledata moduledata // linker symbol 405 var lastmoduledatap *moduledata // linker symbol 406 var modulesSlice unsafe.Pointer // see activeModules 407 408 // activeModules returns a slice of active modules. 409 // 410 // A module is active once its gcdatamask and gcbssmask have been 411 // assembled and it is usable by the GC. 412 func activeModules() []*moduledata { 413 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 414 if p == nil { 415 return nil 416 } 417 return *p 418 } 419 420 // modulesinit creates the active modules slice out of all loaded modules. 421 // 422 // When a module is first loaded by the dynamic linker, an .init_array 423 // function (written by cmd/link) is invoked to call addmoduledata, 424 // appending to the module to the linked list that starts with 425 // firstmoduledata. 426 // 427 // There are two times this can happen in the lifecycle of a Go 428 // program. First, if compiled with -linkshared, a number of modules 429 // built with -buildmode=shared can be loaded at program initialization. 430 // Second, a Go program can load a module while running that was built 431 // with -buildmode=plugin. 432 // 433 // After loading, this function is called which initializes the 434 // moduledata so it is usable by the GC and creates a new activeModules 435 // list. 436 // 437 // Only one goroutine may call modulesinit at a time. 438 func modulesinit() { 439 modules := new([]*moduledata) 440 for md := &firstmoduledata; md != nil; md = md.next { 441 *modules = append(*modules, md) 442 if md.gcdatamask == (bitvector{}) { 443 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 444 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 445 } 446 } 447 448 // Modules appear in the moduledata linked list in the order they are 449 // loaded by the dynamic loader, with one exception: the 450 // firstmoduledata itself the module that contains the runtime. This 451 // is not always the first module (when using -buildmode=shared, it 452 // is typically libstd.so, the second module). The order matters for 453 // typelinksinit, so we swap the first module with whatever module 454 // contains the main function. 455 // 456 // See Issue #18729. 457 mainText := funcPC(main_main) 458 for i, md := range *modules { 459 if md.text <= mainText && mainText <= md.etext { 460 (*modules)[0] = md 461 (*modules)[i] = &firstmoduledata 462 break 463 } 464 } 465 466 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 467 } 468 469 type functab struct { 470 entry uintptr 471 funcoff uintptr 472 } 473 474 // Mapping information for secondary text sections 475 476 type textsect struct { 477 vaddr uintptr // prelinked section vaddr 478 length uintptr // section length 479 baseaddr uintptr // relocated section address 480 } 481 482 const minfunc = 16 // minimum function size 483 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 484 485 // findfunctab is an array of these structures. 486 // Each bucket represents 4096 bytes of the text segment. 487 // Each subbucket represents 256 bytes of the text segment. 488 // To find a function given a pc, locate the bucket and subbucket for 489 // that pc. Add together the idx and subbucket value to obtain a 490 // function index. Then scan the functab array starting at that 491 // index to find the target function. 492 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 493 type findfuncbucket struct { 494 idx uint32 495 subbuckets [16]byte 496 } 497 498 func moduledataverify() { 499 for datap := &firstmoduledata; datap != nil; datap = datap.next { 500 moduledataverify1(datap) 501 } 502 } 503 504 const debugPcln = false 505 506 func moduledataverify1(datap *moduledata) { 507 // See golang.org/s/go12symtab for header: 0xfffffffb, 508 // two zero bytes, a byte giving the PC quantum, 509 // and a byte giving the pointer width in bytes. 510 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 511 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 512 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 513 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 514 throw("invalid function symbol table\n") 515 } 516 517 // ftab is lookup table for function by program counter. 518 nftab := len(datap.ftab) - 1 519 var pcCache pcvalueCache 520 for i := 0; i < nftab; i++ { 521 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 522 if datap.ftab[i].entry > datap.ftab[i+1].entry { 523 f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 524 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 525 f2name := "end" 526 if i+1 < nftab { 527 f2name = funcname(f2) 528 } 529 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 530 for j := 0; j <= i; j++ { 531 print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n") 532 } 533 throw("invalid runtime symbol table") 534 } 535 536 if debugPcln || nftab-i < 5 { 537 // Check a PC near but not at the very end. 538 // The very end might be just padding that is not covered by the tables. 539 // No architecture rounds function entries to more than 16 bytes, 540 // but if one came along we'd need to subtract more here. 541 // But don't use the next PC if it corresponds to a foreign object chunk 542 // (no pcln table, f2.pcln == 0). That chunk might have an alignment 543 // more than 16 bytes. 544 f := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 545 end := f.entry 546 if i+1 < nftab { 547 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 548 if f2.pcln != 0 { 549 end = f2.entry - 16 550 if end < f.entry { 551 end = f.entry 552 } 553 } 554 } 555 pcvalue(f, f.pcfile, end, &pcCache, true) 556 pcvalue(f, f.pcln, end, &pcCache, true) 557 pcvalue(f, f.pcsp, end, &pcCache, true) 558 } 559 } 560 561 if datap.minpc != datap.ftab[0].entry || 562 datap.maxpc != datap.ftab[nftab].entry { 563 throw("minpc or maxpc invalid") 564 } 565 566 for _, modulehash := range datap.modulehashes { 567 if modulehash.linktimehash != *modulehash.runtimehash { 568 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 569 throw("abi mismatch") 570 } 571 } 572 } 573 574 // FuncForPC returns a *Func describing the function that contains the 575 // given program counter address, or else nil. 576 func FuncForPC(pc uintptr) *Func { 577 return findfunc(pc)._Func() 578 } 579 580 // Name returns the name of the function. 581 func (f *Func) Name() string { 582 return funcname(f.funcInfo()) 583 } 584 585 // Entry returns the entry address of the function. 586 func (f *Func) Entry() uintptr { 587 return f.raw().entry 588 } 589 590 // FileLine returns the file name and line number of the 591 // source code corresponding to the program counter pc. 592 // The result will not be accurate if pc is not a program 593 // counter within f. 594 func (f *Func) FileLine(pc uintptr) (file string, line int) { 595 // Pass strict=false here, because anyone can call this function, 596 // and they might just be wrong about targetpc belonging to f. 597 file, line32 := funcline1(f.funcInfo(), pc, false) 598 return file, int(line32) 599 } 600 601 func findmoduledatap(pc uintptr) *moduledata { 602 for datap := &firstmoduledata; datap != nil; datap = datap.next { 603 if datap.minpc <= pc && pc < datap.maxpc { 604 return datap 605 } 606 } 607 return nil 608 } 609 610 type funcInfo struct { 611 *_func 612 datap *moduledata 613 } 614 615 func (f funcInfo) valid() bool { 616 return f._func != nil 617 } 618 619 func (f funcInfo) _Func() *Func { 620 return (*Func)(unsafe.Pointer(f._func)) 621 } 622 623 func findfunc(pc uintptr) funcInfo { 624 datap := findmoduledatap(pc) 625 if datap == nil { 626 return funcInfo{} 627 } 628 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 629 630 x := pc - datap.minpc 631 b := x / pcbucketsize 632 i := x % pcbucketsize / (pcbucketsize / nsub) 633 634 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 635 idx := ffb.idx + uint32(ffb.subbuckets[i]) 636 637 // If the idx is beyond the end of the ftab, set it to the end of the table and search backward. 638 // This situation can occur if multiple text sections are generated to handle large text sections 639 // and the linker has inserted jump tables between them. 640 641 if idx >= uint32(len(datap.ftab)) { 642 idx = uint32(len(datap.ftab) - 1) 643 } 644 if pc < datap.ftab[idx].entry { 645 646 // With multiple text sections, the idx might reference a function address that 647 // is higher than the pc being searched, so search backward until the matching address is found. 648 649 for datap.ftab[idx].entry > pc && idx > 0 { 650 idx-- 651 } 652 if idx == 0 { 653 throw("findfunc: bad findfunctab entry idx") 654 } 655 } else { 656 657 // linear search to find func with pc >= entry. 658 for datap.ftab[idx+1].entry <= pc { 659 idx++ 660 } 661 } 662 return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap} 663 } 664 665 type pcvalueCache struct { 666 entries [16]pcvalueCacheEnt 667 } 668 669 type pcvalueCacheEnt struct { 670 // targetpc and off together are the key of this cache entry. 671 targetpc uintptr 672 off int32 673 // val is the value of this cached pcvalue entry. 674 val int32 675 } 676 677 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 678 if off == 0 { 679 return -1 680 } 681 682 // Check the cache. This speeds up walks of deep stacks, which 683 // tend to have the same recursive functions over and over. 684 // 685 // This cache is small enough that full associativity is 686 // cheaper than doing the hashing for a less associative 687 // cache. 688 if cache != nil { 689 for _, ent := range cache.entries { 690 // We check off first because we're more 691 // likely to have multiple entries with 692 // different offsets for the same targetpc 693 // than the other way around, so we'll usually 694 // fail in the first clause. 695 if ent.off == off && ent.targetpc == targetpc { 696 return ent.val 697 } 698 } 699 } 700 701 if !f.valid() { 702 if strict && panicking == 0 { 703 print("runtime: no module data for ", hex(f.entry), "\n") 704 throw("no module data") 705 } 706 return -1 707 } 708 datap := f.datap 709 p := datap.pclntable[off:] 710 pc := f.entry 711 val := int32(-1) 712 for { 713 var ok bool 714 p, ok = step(p, &pc, &val, pc == f.entry) 715 if !ok { 716 break 717 } 718 if targetpc < pc { 719 // Replace a random entry in the cache. Random 720 // replacement prevents a performance cliff if 721 // a recursive stack's cycle is slightly 722 // larger than the cache. 723 if cache != nil { 724 ci := fastrandn(uint32(len(cache.entries))) 725 cache.entries[ci] = pcvalueCacheEnt{ 726 targetpc: targetpc, 727 off: off, 728 val: val, 729 } 730 } 731 732 return val 733 } 734 } 735 736 // If there was a table, it should have covered all program counters. 737 // If not, something is wrong. 738 if panicking != 0 || !strict { 739 return -1 740 } 741 742 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 743 744 p = datap.pclntable[off:] 745 pc = f.entry 746 val = -1 747 for { 748 var ok bool 749 p, ok = step(p, &pc, &val, pc == f.entry) 750 if !ok { 751 break 752 } 753 print("\tvalue=", val, " until pc=", hex(pc), "\n") 754 } 755 756 throw("invalid runtime symbol table") 757 return -1 758 } 759 760 func cfuncname(f funcInfo) *byte { 761 if !f.valid() || f.nameoff == 0 { 762 return nil 763 } 764 return &f.datap.pclntable[f.nameoff] 765 } 766 767 func funcname(f funcInfo) string { 768 return gostringnocopy(cfuncname(f)) 769 } 770 771 func funcnameFromNameoff(f funcInfo, nameoff int32) string { 772 datap := f.datap 773 if !f.valid() { 774 return "" 775 } 776 cstr := &datap.pclntable[nameoff] 777 return gostringnocopy(cstr) 778 } 779 780 func funcfile(f funcInfo, fileno int32) string { 781 datap := f.datap 782 if !f.valid() { 783 return "?" 784 } 785 return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 786 } 787 788 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { 789 datap := f.datap 790 if !f.valid() { 791 return "?", 0 792 } 793 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 794 line = pcvalue(f, f.pcln, targetpc, nil, strict) 795 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 796 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 797 return "?", 0 798 } 799 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 800 return 801 } 802 803 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 804 return funcline1(f, targetpc, true) 805 } 806 807 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { 808 x := pcvalue(f, f.pcsp, targetpc, cache, true) 809 if x&(sys.PtrSize-1) != 0 { 810 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 811 } 812 return x 813 } 814 815 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 816 if table < 0 || table >= f.npcdata { 817 return -1 818 } 819 off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 820 return pcvalue(f, off, targetpc, cache, true) 821 } 822 823 func funcdata(f funcInfo, i int32) unsafe.Pointer { 824 if i < 0 || i >= f.nfuncdata { 825 return nil 826 } 827 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 828 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 829 if uintptr(unsafe.Pointer(f._func))&4 != 0 { 830 println("runtime: misaligned func", f._func) 831 } 832 p = add(p, 4) 833 } 834 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 835 } 836 837 // step advances to the next pc, value pair in the encoded table. 838 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 839 p, uvdelta := readvarint(p) 840 if uvdelta == 0 && !first { 841 return nil, false 842 } 843 if uvdelta&1 != 0 { 844 uvdelta = ^(uvdelta >> 1) 845 } else { 846 uvdelta >>= 1 847 } 848 vdelta := int32(uvdelta) 849 p, pcdelta := readvarint(p) 850 *pc += uintptr(pcdelta * sys.PCQuantum) 851 *val += vdelta 852 return p, true 853 } 854 855 // readvarint reads a varint from p. 856 func readvarint(p []byte) (newp []byte, val uint32) { 857 var v, shift uint32 858 for { 859 b := p[0] 860 p = p[1:] 861 v |= (uint32(b) & 0x7F) << shift 862 if b&0x80 == 0 { 863 break 864 } 865 shift += 7 866 } 867 return p, v 868 } 869 870 type stackmap struct { 871 n int32 // number of bitmaps 872 nbit int32 // number of bits in each bitmap 873 bytedata [1]byte // bitmaps, each starting on a byte boundary 874 } 875 876 //go:nowritebarrier 877 func stackmapdata(stkmap *stackmap, n int32) bitvector { 878 if n < 0 || n >= stkmap.n { 879 throw("stackmapdata: index out of range") 880 } 881 return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+7)/8))))} 882 } 883 884 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. 885 type inlinedCall struct { 886 parent int32 // index of parent in the inltree, or < 0 887 file int32 // fileno index into filetab 888 line int32 // line number of the call site 889 func_ int32 // offset into pclntab for name of called function 890 }