github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/symtab.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Frames may be used to get function/file/line information for a 14 // slice of PC values returned by Callers. 15 type Frames struct { 16 // callers is a slice of PCs that have not yet been expanded to frames. 17 callers []uintptr 18 19 // frames is a slice of Frames that have yet to be returned. 20 frames []Frame 21 frameStore [2]Frame 22 } 23 24 // Frame is the information returned by Frames for each call frame. 25 type Frame struct { 26 // PC is the program counter for the location in this frame. 27 // For a frame that calls another frame, this will be the 28 // program counter of a call instruction. Because of inlining, 29 // multiple frames may have the same PC value, but different 30 // symbolic information. 31 PC uintptr 32 33 // Func is the Func value of this call frame. This may be nil 34 // for non-Go code or fully inlined functions. 35 Func *Func 36 37 // Function is the package path-qualified function name of 38 // this call frame. If non-empty, this string uniquely 39 // identifies a single function in the program. 40 // This may be the empty string if not known. 41 // If Func is not nil then Function == Func.Name(). 42 Function string 43 44 // File and Line are the file name and line number of the 45 // location in this frame. For non-leaf frames, this will be 46 // the location of a call. These may be the empty string and 47 // zero, respectively, if not known. 48 File string 49 Line int 50 51 // Entry point program counter for the function; may be zero 52 // if not known. If Func is not nil then Entry == 53 // Func.Entry(). 54 Entry uintptr 55 } 56 57 // CallersFrames takes a slice of PC values returned by Callers and 58 // prepares to return function/file/line information. 59 // Do not change the slice until you are done with the Frames. 60 func CallersFrames(callers []uintptr) *Frames { 61 f := &Frames{callers: callers} 62 f.frames = f.frameStore[:0] 63 return f 64 } 65 66 // Next returns frame information for the next caller. 67 // If more is false, there are no more callers (the Frame value is valid). 68 func (ci *Frames) Next() (frame Frame, more bool) { 69 for len(ci.frames) < 2 { 70 // Find the next frame. 71 // We need to look for 2 frames so we know what 72 // to return for the "more" result. 73 if len(ci.callers) == 0 { 74 break 75 } 76 pc := ci.callers[0] 77 ci.callers = ci.callers[1:] 78 funcInfo := findfunc(pc) 79 if !funcInfo.valid() { 80 if cgoSymbolizer != nil { 81 // Pre-expand cgo frames. We could do this 82 // incrementally, too, but there's no way to 83 // avoid allocation in this case anyway. 84 ci.frames = append(ci.frames, expandCgoFrames(pc)...) 85 } 86 continue 87 } 88 f := funcInfo._Func() 89 entry := f.Entry() 90 if pc > entry { 91 // We store the pc of the start of the instruction following 92 // the instruction in question (the call or the inline mark). 93 // This is done for historical reasons, and to make FuncForPC 94 // work correctly for entries in the result of runtime.Callers. 95 pc-- 96 } 97 name := funcname(funcInfo) 98 file, line := funcline1(funcInfo, pc, false) 99 if inldata := funcdata(funcInfo, _FUNCDATA_InlTree); inldata != nil { 100 inltree := (*[1 << 20]inlinedCall)(inldata) 101 ix := pcdatavalue(funcInfo, _PCDATA_InlTreeIndex, pc, nil) 102 if ix >= 0 { 103 // Note: entry is not modified. It always refers to a real frame, not an inlined one. 104 f = nil 105 name = funcnameFromNameoff(funcInfo, inltree[ix].func_) 106 // File/line is already correct. 107 // TODO: remove file/line from InlinedCall? 108 } 109 } 110 ci.frames = append(ci.frames, Frame{ 111 PC: pc, 112 Func: f, 113 Function: name, 114 File: file, 115 Line: int(line), 116 Entry: entry, 117 }) 118 } 119 120 // Pop one frame from the frame list. Keep the rest. 121 // Avoid allocation in the common case, which is 1 or 2 frames. 122 switch len(ci.frames) { 123 case 0: // In the rare case when there are no frames at all, we return Frame{}. 124 case 1: 125 frame = ci.frames[0] 126 ci.frames = ci.frameStore[:0] 127 case 2: 128 frame = ci.frames[0] 129 ci.frameStore[0] = ci.frames[1] 130 ci.frames = ci.frameStore[:1] 131 default: 132 frame = ci.frames[0] 133 ci.frames = ci.frames[1:] 134 } 135 more = len(ci.frames) > 0 136 return 137 } 138 139 // expandCgoFrames expands frame information for pc, known to be 140 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames 141 // returns nil if pc could not be expanded. 142 func expandCgoFrames(pc uintptr) []Frame { 143 arg := cgoSymbolizerArg{pc: pc} 144 callCgoSymbolizer(&arg) 145 146 if arg.file == nil && arg.funcName == nil { 147 // No useful information from symbolizer. 148 return nil 149 } 150 151 var frames []Frame 152 for { 153 frames = append(frames, Frame{ 154 PC: pc, 155 Func: nil, 156 Function: gostring(arg.funcName), 157 File: gostring(arg.file), 158 Line: int(arg.lineno), 159 Entry: arg.entry, 160 }) 161 if arg.more == 0 { 162 break 163 } 164 callCgoSymbolizer(&arg) 165 } 166 167 // No more frames for this PC. Tell the symbolizer we are done. 168 // We don't try to maintain a single cgoSymbolizerArg for the 169 // whole use of Frames, because there would be no good way to tell 170 // the symbolizer when we are done. 171 arg.pc = 0 172 callCgoSymbolizer(&arg) 173 174 return frames 175 } 176 177 // NOTE: Func does not expose the actual unexported fields, because we return *Func 178 // values to users, and we want to keep them from being able to overwrite the data 179 // with (say) *f = Func{}. 180 // All code operating on a *Func must call raw() to get the *_func 181 // or funcInfo() to get the funcInfo instead. 182 183 // A Func represents a Go function in the running binary. 184 type Func struct { 185 opaque struct{} // unexported field to disallow conversions 186 } 187 188 func (f *Func) raw() *_func { 189 return (*_func)(unsafe.Pointer(f)) 190 } 191 192 func (f *Func) funcInfo() funcInfo { 193 fn := f.raw() 194 return funcInfo{fn, findmoduledatap(fn.entry)} 195 } 196 197 // PCDATA and FUNCDATA table indexes. 198 // 199 // See funcdata.h and ../cmd/internal/objabi/funcdata.go. 200 const ( 201 _PCDATA_StackMapIndex = 0 202 _PCDATA_InlTreeIndex = 1 203 _PCDATA_RegMapIndex = 2 204 _FUNCDATA_ArgsPointerMaps = 0 205 _FUNCDATA_LocalsPointerMaps = 1 206 _FUNCDATA_InlTree = 2 207 _FUNCDATA_RegPointerMaps = 3 208 _FUNCDATA_StackObjects = 4 209 _ArgsSizeUnknown = -0x80000000 210 ) 211 212 // A FuncID identifies particular functions that need to be treated 213 // specially by the runtime. 214 // Note that in some situations involving plugins, there may be multiple 215 // copies of a particular special runtime function. 216 // Note: this list must match the list in cmd/internal/objabi/funcid.go. 217 type funcID uint8 218 219 const ( 220 funcID_normal funcID = iota // not a special function 221 funcID_runtime_main 222 funcID_goexit 223 funcID_jmpdefer 224 funcID_mcall 225 funcID_morestack 226 funcID_mstart 227 funcID_rt0_go 228 funcID_asmcgocall 229 funcID_sigpanic 230 funcID_runfinq 231 funcID_gcBgMarkWorker 232 funcID_systemstack_switch 233 funcID_systemstack 234 funcID_cgocallback_gofunc 235 funcID_gogo 236 funcID_externalthreadhandler 237 funcID_debugCallV1 238 funcID_gopanic 239 funcID_panicwrap 240 funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) 241 ) 242 243 // moduledata records information about the layout of the executable 244 // image. It is written by the linker. Any changes here must be 245 // matched changes to the code in cmd/internal/ld/symtab.go:symtab. 246 // moduledata is stored in statically allocated non-pointer memory; 247 // none of the pointers here are visible to the garbage collector. 248 type moduledata struct { 249 pclntable []byte 250 ftab []functab 251 filetab []uint32 252 findfunctab uintptr 253 minpc, maxpc uintptr 254 255 text, etext uintptr 256 noptrdata, enoptrdata uintptr 257 data, edata uintptr 258 bss, ebss uintptr 259 noptrbss, enoptrbss uintptr 260 end, gcdata, gcbss uintptr 261 types, etypes uintptr 262 263 textsectmap []textsect 264 typelinks []int32 // offsets from types 265 itablinks []*itab 266 267 ptab []ptabEntry 268 269 pluginpath string 270 pkghashes []modulehash 271 272 modulename string 273 modulehashes []modulehash 274 275 hasmain uint8 // 1 if module contains the main function, 0 otherwise 276 277 gcdatamask, gcbssmask bitvector 278 279 typemap map[typeOff]*_type // offset to *_rtype in previous module 280 281 bad bool // module failed to load and should be ignored 282 283 next *moduledata 284 } 285 286 // A modulehash is used to compare the ABI of a new module or a 287 // package in a new module with the loaded program. 288 // 289 // For each shared library a module links against, the linker creates an entry in the 290 // moduledata.modulehashes slice containing the name of the module, the abi hash seen 291 // at link time and a pointer to the runtime abi hash. These are checked in 292 // moduledataverify1 below. 293 // 294 // For each loaded plugin, the pkghashes slice has a modulehash of the 295 // newly loaded package that can be used to check the plugin's version of 296 // a package against any previously loaded version of the package. 297 // This is done in plugin.lastmoduleinit. 298 type modulehash struct { 299 modulename string 300 linktimehash string 301 runtimehash *string 302 } 303 304 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. 305 // 306 // These typemap objects are allocated at run time on the heap, but the 307 // only direct reference to them is in the moduledata, created by the 308 // linker and marked SNOPTRDATA so it is ignored by the GC. 309 // 310 // To make sure the map isn't collected, we keep a second reference here. 311 var pinnedTypemaps []map[typeOff]*_type 312 313 var firstmoduledata moduledata // linker symbol 314 var lastmoduledatap *moduledata // linker symbol 315 var modulesSlice *[]*moduledata // see activeModules 316 317 // activeModules returns a slice of active modules. 318 // 319 // A module is active once its gcdatamask and gcbssmask have been 320 // assembled and it is usable by the GC. 321 // 322 // This is nosplit/nowritebarrier because it is called by the 323 // cgo pointer checking code. 324 //go:nosplit 325 //go:nowritebarrier 326 func activeModules() []*moduledata { 327 p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice))) 328 if p == nil { 329 return nil 330 } 331 return *p 332 } 333 334 // modulesinit creates the active modules slice out of all loaded modules. 335 // 336 // When a module is first loaded by the dynamic linker, an .init_array 337 // function (written by cmd/link) is invoked to call addmoduledata, 338 // appending to the module to the linked list that starts with 339 // firstmoduledata. 340 // 341 // There are two times this can happen in the lifecycle of a Go 342 // program. First, if compiled with -linkshared, a number of modules 343 // built with -buildmode=shared can be loaded at program initialization. 344 // Second, a Go program can load a module while running that was built 345 // with -buildmode=plugin. 346 // 347 // After loading, this function is called which initializes the 348 // moduledata so it is usable by the GC and creates a new activeModules 349 // list. 350 // 351 // Only one goroutine may call modulesinit at a time. 352 func modulesinit() { 353 modules := new([]*moduledata) 354 for md := &firstmoduledata; md != nil; md = md.next { 355 if md.bad { 356 continue 357 } 358 *modules = append(*modules, md) 359 if md.gcdatamask == (bitvector{}) { 360 md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data) 361 md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss) 362 } 363 } 364 365 // Modules appear in the moduledata linked list in the order they are 366 // loaded by the dynamic loader, with one exception: the 367 // firstmoduledata itself the module that contains the runtime. This 368 // is not always the first module (when using -buildmode=shared, it 369 // is typically libstd.so, the second module). The order matters for 370 // typelinksinit, so we swap the first module with whatever module 371 // contains the main function. 372 // 373 // See Issue #18729. 374 for i, md := range *modules { 375 if md.hasmain != 0 { 376 (*modules)[0] = md 377 (*modules)[i] = &firstmoduledata 378 break 379 } 380 } 381 382 atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules)) 383 } 384 385 type functab struct { 386 entry uintptr 387 funcoff uintptr 388 } 389 390 // Mapping information for secondary text sections 391 392 type textsect struct { 393 vaddr uintptr // prelinked section vaddr 394 length uintptr // section length 395 baseaddr uintptr // relocated section address 396 } 397 398 const minfunc = 16 // minimum function size 399 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table 400 401 // findfunctab is an array of these structures. 402 // Each bucket represents 4096 bytes of the text segment. 403 // Each subbucket represents 256 bytes of the text segment. 404 // To find a function given a pc, locate the bucket and subbucket for 405 // that pc. Add together the idx and subbucket value to obtain a 406 // function index. Then scan the functab array starting at that 407 // index to find the target function. 408 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. 409 type findfuncbucket struct { 410 idx uint32 411 subbuckets [16]byte 412 } 413 414 func moduledataverify() { 415 for datap := &firstmoduledata; datap != nil; datap = datap.next { 416 moduledataverify1(datap) 417 } 418 } 419 420 const debugPcln = false 421 422 func moduledataverify1(datap *moduledata) { 423 // See golang.org/s/go12symtab for header: 0xfffffffb, 424 // two zero bytes, a byte giving the PC quantum, 425 // and a byte giving the pointer width in bytes. 426 pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable)) 427 pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable)) 428 if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize { 429 println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7])) 430 throw("invalid function symbol table\n") 431 } 432 433 // ftab is lookup table for function by program counter. 434 nftab := len(datap.ftab) - 1 435 for i := 0; i < nftab; i++ { 436 // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function. 437 if datap.ftab[i].entry > datap.ftab[i+1].entry { 438 f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap} 439 f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap} 440 f2name := "end" 441 if i+1 < nftab { 442 f2name = funcname(f2) 443 } 444 println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name) 445 for j := 0; j <= i; j++ { 446 print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n") 447 } 448 throw("invalid runtime symbol table") 449 } 450 } 451 452 if datap.minpc != datap.ftab[0].entry || 453 datap.maxpc != datap.ftab[nftab].entry { 454 throw("minpc or maxpc invalid") 455 } 456 457 for _, modulehash := range datap.modulehashes { 458 if modulehash.linktimehash != *modulehash.runtimehash { 459 println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename) 460 throw("abi mismatch") 461 } 462 } 463 } 464 465 // FuncForPC returns a *Func describing the function that contains the 466 // given program counter address, or else nil. 467 // 468 // If pc represents multiple functions because of inlining, it returns 469 // the a *Func describing the innermost function, but with an entry 470 // of the outermost function. 471 func FuncForPC(pc uintptr) *Func { 472 f := findfunc(pc) 473 if !f.valid() { 474 return nil 475 } 476 if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { 477 // Note: strict=false so bad PCs (those between functions) don't crash the runtime. 478 // We just report the preceeding function in that situation. See issue 29735. 479 // TODO: Perhaps we should report no function at all in that case. 480 // The runtime currently doesn't have function end info, alas. 481 if ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, pc, nil, false); ix >= 0 { 482 inltree := (*[1 << 20]inlinedCall)(inldata) 483 name := funcnameFromNameoff(f, inltree[ix].func_) 484 file, line := funcline(f, pc) 485 fi := &funcinl{ 486 entry: f.entry, // entry of the real (the outermost) function. 487 name: name, 488 file: file, 489 line: int(line), 490 } 491 return (*Func)(unsafe.Pointer(fi)) 492 } 493 } 494 return f._Func() 495 } 496 497 // Name returns the name of the function. 498 func (f *Func) Name() string { 499 if f == nil { 500 return "" 501 } 502 fn := f.raw() 503 if fn.entry == 0 { // inlined version 504 fi := (*funcinl)(unsafe.Pointer(fn)) 505 return fi.name 506 } 507 return funcname(f.funcInfo()) 508 } 509 510 // Entry returns the entry address of the function. 511 func (f *Func) Entry() uintptr { 512 fn := f.raw() 513 if fn.entry == 0 { // inlined version 514 fi := (*funcinl)(unsafe.Pointer(fn)) 515 return fi.entry 516 } 517 return fn.entry 518 } 519 520 // FileLine returns the file name and line number of the 521 // source code corresponding to the program counter pc. 522 // The result will not be accurate if pc is not a program 523 // counter within f. 524 func (f *Func) FileLine(pc uintptr) (file string, line int) { 525 fn := f.raw() 526 if fn.entry == 0 { // inlined version 527 fi := (*funcinl)(unsafe.Pointer(fn)) 528 return fi.file, fi.line 529 } 530 // Pass strict=false here, because anyone can call this function, 531 // and they might just be wrong about targetpc belonging to f. 532 file, line32 := funcline1(f.funcInfo(), pc, false) 533 return file, int(line32) 534 } 535 536 func findmoduledatap(pc uintptr) *moduledata { 537 for datap := &firstmoduledata; datap != nil; datap = datap.next { 538 if datap.minpc <= pc && pc < datap.maxpc { 539 return datap 540 } 541 } 542 return nil 543 } 544 545 type funcInfo struct { 546 *_func 547 datap *moduledata 548 } 549 550 func (f funcInfo) valid() bool { 551 return f._func != nil 552 } 553 554 func (f funcInfo) _Func() *Func { 555 return (*Func)(unsafe.Pointer(f._func)) 556 } 557 558 func findfunc(pc uintptr) funcInfo { 559 datap := findmoduledatap(pc) 560 if datap == nil { 561 return funcInfo{} 562 } 563 const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 564 565 x := pc - datap.minpc 566 b := x / pcbucketsize 567 i := x % pcbucketsize / (pcbucketsize / nsub) 568 569 ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 570 idx := ffb.idx + uint32(ffb.subbuckets[i]) 571 572 // If the idx is beyond the end of the ftab, set it to the end of the table and search backward. 573 // This situation can occur if multiple text sections are generated to handle large text sections 574 // and the linker has inserted jump tables between them. 575 576 if idx >= uint32(len(datap.ftab)) { 577 idx = uint32(len(datap.ftab) - 1) 578 } 579 if pc < datap.ftab[idx].entry { 580 // With multiple text sections, the idx might reference a function address that 581 // is higher than the pc being searched, so search backward until the matching address is found. 582 583 for datap.ftab[idx].entry > pc && idx > 0 { 584 idx-- 585 } 586 if idx == 0 { 587 throw("findfunc: bad findfunctab entry idx") 588 } 589 } else { 590 // linear search to find func with pc >= entry. 591 for datap.ftab[idx+1].entry <= pc { 592 idx++ 593 } 594 } 595 return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap} 596 } 597 598 type pcvalueCache struct { 599 entries [2][8]pcvalueCacheEnt 600 } 601 602 type pcvalueCacheEnt struct { 603 // targetpc and off together are the key of this cache entry. 604 targetpc uintptr 605 off int32 606 // val is the value of this cached pcvalue entry. 607 val int32 608 } 609 610 // pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc. 611 // It must be very cheap to calculate. 612 // For now, align to sys.PtrSize and reduce mod the number of entries. 613 // In practice, this appears to be fairly randomly and evenly distributed. 614 func pcvalueCacheKey(targetpc uintptr) uintptr { 615 return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries)) 616 } 617 618 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 619 if off == 0 { 620 return -1 621 } 622 623 // Check the cache. This speeds up walks of deep stacks, which 624 // tend to have the same recursive functions over and over. 625 // 626 // This cache is small enough that full associativity is 627 // cheaper than doing the hashing for a less associative 628 // cache. 629 if cache != nil { 630 x := pcvalueCacheKey(targetpc) 631 for i := range cache.entries[x] { 632 // We check off first because we're more 633 // likely to have multiple entries with 634 // different offsets for the same targetpc 635 // than the other way around, so we'll usually 636 // fail in the first clause. 637 ent := &cache.entries[x][i] 638 if ent.off == off && ent.targetpc == targetpc { 639 return ent.val 640 } 641 } 642 } 643 644 if !f.valid() { 645 if strict && panicking == 0 { 646 print("runtime: no module data for ", hex(f.entry), "\n") 647 throw("no module data") 648 } 649 return -1 650 } 651 datap := f.datap 652 p := datap.pclntable[off:] 653 pc := f.entry 654 val := int32(-1) 655 for { 656 var ok bool 657 p, ok = step(p, &pc, &val, pc == f.entry) 658 if !ok { 659 break 660 } 661 if targetpc < pc { 662 // Replace a random entry in the cache. Random 663 // replacement prevents a performance cliff if 664 // a recursive stack's cycle is slightly 665 // larger than the cache. 666 // Put the new element at the beginning, 667 // since it is the most likely to be newly used. 668 if cache != nil { 669 x := pcvalueCacheKey(targetpc) 670 e := &cache.entries[x] 671 ci := fastrand() % uint32(len(cache.entries[x])) 672 e[ci] = e[0] 673 e[0] = pcvalueCacheEnt{ 674 targetpc: targetpc, 675 off: off, 676 val: val, 677 } 678 } 679 680 return val 681 } 682 } 683 684 // If there was a table, it should have covered all program counters. 685 // If not, something is wrong. 686 if panicking != 0 || !strict { 687 return -1 688 } 689 690 print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") 691 692 p = datap.pclntable[off:] 693 pc = f.entry 694 val = -1 695 for { 696 var ok bool 697 p, ok = step(p, &pc, &val, pc == f.entry) 698 if !ok { 699 break 700 } 701 print("\tvalue=", val, " until pc=", hex(pc), "\n") 702 } 703 704 throw("invalid runtime symbol table") 705 return -1 706 } 707 708 func cfuncname(f funcInfo) *byte { 709 if !f.valid() || f.nameoff == 0 { 710 return nil 711 } 712 return &f.datap.pclntable[f.nameoff] 713 } 714 715 func funcname(f funcInfo) string { 716 return gostringnocopy(cfuncname(f)) 717 } 718 719 func funcnameFromNameoff(f funcInfo, nameoff int32) string { 720 datap := f.datap 721 if !f.valid() { 722 return "" 723 } 724 cstr := &datap.pclntable[nameoff] 725 return gostringnocopy(cstr) 726 } 727 728 func funcfile(f funcInfo, fileno int32) string { 729 datap := f.datap 730 if !f.valid() { 731 return "?" 732 } 733 return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 734 } 735 736 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { 737 datap := f.datap 738 if !f.valid() { 739 return "?", 0 740 } 741 fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict)) 742 line = pcvalue(f, f.pcln, targetpc, nil, strict) 743 if fileno == -1 || line == -1 || fileno >= len(datap.filetab) { 744 // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") 745 return "?", 0 746 } 747 file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) 748 return 749 } 750 751 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 752 return funcline1(f, targetpc, true) 753 } 754 755 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { 756 x := pcvalue(f, f.pcsp, targetpc, cache, true) 757 if x&(sys.PtrSize-1) != 0 { 758 print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 759 } 760 return x 761 } 762 763 func pcdatastart(f funcInfo, table int32) int32 { 764 return *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 765 } 766 767 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { 768 if table < 0 || table >= f.npcdata { 769 return -1 770 } 771 return pcvalue(f, pcdatastart(f, table), targetpc, cache, true) 772 } 773 774 func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { 775 if table < 0 || table >= f.npcdata { 776 return -1 777 } 778 return pcvalue(f, pcdatastart(f, table), targetpc, cache, strict) 779 } 780 781 func funcdata(f funcInfo, i uint8) unsafe.Pointer { 782 if i < 0 || i >= f.nfuncdata { 783 return nil 784 } 785 p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4) 786 if sys.PtrSize == 8 && uintptr(p)&4 != 0 { 787 if uintptr(unsafe.Pointer(f._func))&4 != 0 { 788 println("runtime: misaligned func", f._func) 789 } 790 p = add(p, 4) 791 } 792 return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize)) 793 } 794 795 // step advances to the next pc, value pair in the encoded table. 796 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 797 // For both uvdelta and pcdelta, the common case (~70%) 798 // is that they are a single byte. If so, avoid calling readvarint. 799 uvdelta := uint32(p[0]) 800 if uvdelta == 0 && !first { 801 return nil, false 802 } 803 n := uint32(1) 804 if uvdelta&0x80 != 0 { 805 n, uvdelta = readvarint(p) 806 } 807 *val += int32(-(uvdelta & 1) ^ (uvdelta >> 1)) 808 p = p[n:] 809 810 pcdelta := uint32(p[0]) 811 n = 1 812 if pcdelta&0x80 != 0 { 813 n, pcdelta = readvarint(p) 814 } 815 p = p[n:] 816 *pc += uintptr(pcdelta * sys.PCQuantum) 817 return p, true 818 } 819 820 // readvarint reads a varint from p. 821 func readvarint(p []byte) (read uint32, val uint32) { 822 var v, shift, n uint32 823 for { 824 b := p[n] 825 n++ 826 v |= uint32(b&0x7F) << (shift & 31) 827 if b&0x80 == 0 { 828 break 829 } 830 shift += 7 831 } 832 return n, v 833 } 834 835 type stackmap struct { 836 n int32 // number of bitmaps 837 nbit int32 // number of bits in each bitmap 838 bytedata [1]byte // bitmaps, each starting on a byte boundary 839 } 840 841 //go:nowritebarrier 842 func stackmapdata(stkmap *stackmap, n int32) bitvector { 843 // Check this invariant only when stackDebug is on at all. 844 // The invariant is already checked by many of stackmapdata's callers, 845 // and disabling it by default allows stackmapdata to be inlined. 846 if stackDebug > 0 && (n < 0 || n >= stkmap.n) { 847 throw("stackmapdata: index out of range") 848 } 849 return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} 850 } 851 852 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. 853 type inlinedCall struct { 854 parent int16 // index of parent in the inltree, or < 0 855 funcID funcID // type of the called function 856 _ byte 857 file int32 // fileno index into filetab 858 line int32 // line number of the call site 859 func_ int32 // offset into pclntab for name of called function 860 parentPc int32 // position of an instruction whose source position is the call site (offset from entry) 861 }