github.com/comwrg/go/src@v0.0.0-20220319063731-c238d0440370/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "github.com/comwrg/go/src/internal/abi" 9 "github.com/comwrg/go/src/internal/cpu" 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 /* 16 Stack layout parameters. 17 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 18 19 The per-goroutine g->stackguard is set to point StackGuard bytes 20 above the bottom of the stack. Each function compares its stack 21 pointer against g->stackguard to check for overflow. To cut one 22 instruction from the check sequence for functions with tiny frames, 23 the stack is allowed to protrude StackSmall bytes below the stack 24 guard. Functions with large frames don't bother with the check and 25 always call morestack. The sequences are (for amd64, others are 26 similar): 27 28 guard = g->stackguard 29 frame = function's stack frame size 30 argsize = size of function arguments (call + return) 31 32 stack frame size <= StackSmall: 33 CMPQ guard, SP 34 JHI 3(PC) 35 MOVQ m->morearg, $(argsize << 32) 36 CALL morestack(SB) 37 38 stack frame size > StackSmall but < StackBig 39 LEAQ (frame-StackSmall)(SP), R0 40 CMPQ guard, R0 41 JHI 3(PC) 42 MOVQ m->morearg, $(argsize << 32) 43 CALL morestack(SB) 44 45 stack frame size >= StackBig: 46 MOVQ m->morearg, $((argsize << 32) | frame) 47 CALL morestack(SB) 48 49 The bottom StackGuard - StackSmall bytes are important: there has 50 to be enough room to execute functions that refuse to check for 51 stack overflow, either because they need to be adjacent to the 52 actual caller's frame (deferproc) or because they handle the imminent 53 stack overflow (morestack). 54 55 For example, deferproc might call malloc, which does one of the 56 above checks (without allocating a full frame), which might trigger 57 a call to morestack. This sequence needs to fit in the bottom 58 section of the stack. On amd64, morestack's frame is 40 bytes, and 59 deferproc's frame is 56 bytes. That fits well within the 60 StackGuard - StackSmall bytes at the bottom. 61 The linkers explore all possible call traces involving non-splitting 62 functions to make sure that this limit cannot be violated. 63 */ 64 65 const ( 66 // StackSystem is a number of additional bytes to add 67 // to each stack below the usual guard area for OS-specific 68 // purposes like signal handling. Used on Windows, Plan 9, 69 // and iOS because they do not use a separate stack. 70 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024 71 72 // The minimum size of stack used by Go code 73 _StackMin = 2048 74 75 // The minimum stack size to allocate. 76 // The hackery here rounds FixedStack0 up to a power of 2. 77 _FixedStack0 = _StackMin + _StackSystem 78 _FixedStack1 = _FixedStack0 - 1 79 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 80 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 81 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 82 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 83 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 84 _FixedStack = _FixedStack6 + 1 85 86 // Functions that need frames bigger than this use an extra 87 // instruction to do the stack split check, to avoid overflow 88 // in case SP - framesize wraps below zero. 89 // This value can be no bigger than the size of the unmapped 90 // space at zero. 91 _StackBig = 4096 92 93 // The stack guard is a pointer this many bytes above the 94 // bottom of the stack. 95 // 96 // The guard leaves enough room for one _StackSmall frame plus 97 // a _StackLimit chain of NOSPLIT calls plus _StackSystem 98 // bytes for the OS. 99 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem 100 101 // After a stack split check the SP is allowed to be this 102 // many bytes below the stack guard. This saves an instruction 103 // in the checking sequence for tiny frames. 104 _StackSmall = 128 105 106 // The maximum number of bytes that a chain of NOSPLIT 107 // functions can use. 108 _StackLimit = _StackGuard - _StackSystem - _StackSmall 109 ) 110 111 const ( 112 // stackDebug == 0: no logging 113 // == 1: logging of per-stack operations 114 // == 2: logging of per-frame operations 115 // == 3: logging of per-word updates 116 // == 4: logging of per-word reads 117 stackDebug = 0 118 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 119 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 120 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 121 stackNoCache = 0 // disable per-P small stack caches 122 123 // check the BP links during traceback. 124 debugCheckBP = false 125 ) 126 127 const ( 128 uintptrMask = 1<<(8*sys.PtrSize) - 1 129 130 // The values below can be stored to g.stackguard0 to force 131 // the next stack check to fail. 132 // These are all larger than any real SP. 133 134 // Goroutine preemption request. 135 // 0xfffffade in hex. 136 stackPreempt = uintptrMask & -1314 137 138 // Thread is forking. Causes a split stack check failure. 139 // 0xfffffb2e in hex. 140 stackFork = uintptrMask & -1234 141 142 // Force a stack movement. Used for debugging. 143 // 0xfffffeed in hex. 144 stackForceMove = uintptrMask & -275 145 ) 146 147 // Global pool of spans that have free stacks. 148 // Stacks are assigned an order according to size. 149 // order = log_2(size/FixedStack) 150 // There is a free list for each order. 151 var stackpool [_NumStackOrders]struct { 152 item stackpoolItem 153 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte 154 } 155 156 //go:notinheap 157 type stackpoolItem struct { 158 mu mutex 159 span mSpanList 160 } 161 162 // Global pool of large stack spans. 163 var stackLarge struct { 164 lock mutex 165 free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages) 166 } 167 168 func stackinit() { 169 if _StackCacheSize&_PageMask != 0 { 170 throw("cache size must be a multiple of page size") 171 } 172 for i := range stackpool { 173 stackpool[i].item.span.init() 174 lockInit(&stackpool[i].item.mu, lockRankStackpool) 175 } 176 for i := range stackLarge.free { 177 stackLarge.free[i].init() 178 lockInit(&stackLarge.lock, lockRankStackLarge) 179 } 180 } 181 182 // stacklog2 returns ⌊log_2(n)⌋. 183 func stacklog2(n uintptr) int { 184 log2 := 0 185 for n > 1 { 186 n >>= 1 187 log2++ 188 } 189 return log2 190 } 191 192 // Allocates a stack from the free pool. Must be called with 193 // stackpool[order].item.mu held. 194 func stackpoolalloc(order uint8) gclinkptr { 195 list := &stackpool[order].item.span 196 s := list.first 197 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 198 if s == nil { 199 // no free stacks. Allocate another span worth. 200 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack) 201 if s == nil { 202 throw("out of memory") 203 } 204 if s.allocCount != 0 { 205 throw("bad allocCount") 206 } 207 if s.manualFreeList.ptr() != nil { 208 throw("bad manualFreeList") 209 } 210 osStackAlloc(s) 211 s.elemsize = _FixedStack << order 212 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 213 x := gclinkptr(s.base() + i) 214 x.ptr().next = s.manualFreeList 215 s.manualFreeList = x 216 } 217 list.insert(s) 218 } 219 x := s.manualFreeList 220 if x.ptr() == nil { 221 throw("span has no free stacks") 222 } 223 s.manualFreeList = x.ptr().next 224 s.allocCount++ 225 if s.manualFreeList.ptr() == nil { 226 // all stacks in s are allocated. 227 list.remove(s) 228 } 229 return x 230 } 231 232 // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held. 233 func stackpoolfree(x gclinkptr, order uint8) { 234 s := spanOfUnchecked(uintptr(x)) 235 if s.state.get() != mSpanManual { 236 throw("freeing stack not in a stack span") 237 } 238 if s.manualFreeList.ptr() == nil { 239 // s will now have a free stack 240 stackpool[order].item.span.insert(s) 241 } 242 x.ptr().next = s.manualFreeList 243 s.manualFreeList = x 244 s.allocCount-- 245 if gcphase == _GCoff && s.allocCount == 0 { 246 // Span is completely free. Return it to the heap 247 // immediately if we're sweeping. 248 // 249 // If GC is active, we delay the free until the end of 250 // GC to avoid the following type of situation: 251 // 252 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 253 // 2) The stack that pointer points to is copied 254 // 3) The old stack is freed 255 // 4) The containing span is marked free 256 // 5) GC attempts to mark the SudoG.elem pointer. The 257 // marking fails because the pointer looks like a 258 // pointer into a free span. 259 // 260 // By not freeing, we prevent step #4 until GC is done. 261 stackpool[order].item.span.remove(s) 262 s.manualFreeList = 0 263 osStackFree(s) 264 mheap_.freeManual(s, spanAllocStack) 265 } 266 } 267 268 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 269 // The pool is required to prevent unlimited growth of per-thread caches. 270 // 271 //go:systemstack 272 func stackcacherefill(c *mcache, order uint8) { 273 if stackDebug >= 1 { 274 print("stackcacherefill order=", order, "\n") 275 } 276 277 // Grab some stacks from the global cache. 278 // Grab half of the allowed capacity (to prevent thrashing). 279 var list gclinkptr 280 var size uintptr 281 lock(&stackpool[order].item.mu) 282 for size < _StackCacheSize/2 { 283 x := stackpoolalloc(order) 284 x.ptr().next = list 285 list = x 286 size += _FixedStack << order 287 } 288 unlock(&stackpool[order].item.mu) 289 c.stackcache[order].list = list 290 c.stackcache[order].size = size 291 } 292 293 //go:systemstack 294 func stackcacherelease(c *mcache, order uint8) { 295 if stackDebug >= 1 { 296 print("stackcacherelease order=", order, "\n") 297 } 298 x := c.stackcache[order].list 299 size := c.stackcache[order].size 300 lock(&stackpool[order].item.mu) 301 for size > _StackCacheSize/2 { 302 y := x.ptr().next 303 stackpoolfree(x, order) 304 x = y 305 size -= _FixedStack << order 306 } 307 unlock(&stackpool[order].item.mu) 308 c.stackcache[order].list = x 309 c.stackcache[order].size = size 310 } 311 312 //go:systemstack 313 func stackcache_clear(c *mcache) { 314 if stackDebug >= 1 { 315 print("stackcache clear\n") 316 } 317 for order := uint8(0); order < _NumStackOrders; order++ { 318 lock(&stackpool[order].item.mu) 319 x := c.stackcache[order].list 320 for x.ptr() != nil { 321 y := x.ptr().next 322 stackpoolfree(x, order) 323 x = y 324 } 325 c.stackcache[order].list = 0 326 c.stackcache[order].size = 0 327 unlock(&stackpool[order].item.mu) 328 } 329 } 330 331 // stackalloc allocates an n byte stack. 332 // 333 // stackalloc must run on the system stack because it uses per-P 334 // resources and must not split the stack. 335 // 336 //go:systemstack 337 func stackalloc(n uint32) stack { 338 // Stackalloc must be called on scheduler stack, so that we 339 // never try to grow the stack during the code that stackalloc runs. 340 // Doing so would cause a deadlock (issue 1547). 341 thisg := getg() 342 if thisg != thisg.m.g0 { 343 throw("stackalloc not on scheduler stack") 344 } 345 if n&(n-1) != 0 { 346 throw("stack size not a power of 2") 347 } 348 if stackDebug >= 1 { 349 print("stackalloc ", n, "\n") 350 } 351 352 if debug.efence != 0 || stackFromSystem != 0 { 353 n = uint32(alignUp(uintptr(n), physPageSize)) 354 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 355 if v == nil { 356 throw("out of memory (stackalloc)") 357 } 358 return stack{uintptr(v), uintptr(v) + uintptr(n)} 359 } 360 361 // Small stacks are allocated with a fixed-size free-list allocator. 362 // If we need a stack of a bigger size, we fall back on allocating 363 // a dedicated span. 364 var v unsafe.Pointer 365 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 366 order := uint8(0) 367 n2 := n 368 for n2 > _FixedStack { 369 order++ 370 n2 >>= 1 371 } 372 var x gclinkptr 373 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { 374 // thisg.m.p == 0 can happen in the guts of exitsyscall 375 // or procresize. Just get a stack from the global pool. 376 // Also don't touch stackcache during gc 377 // as it's flushed concurrently. 378 lock(&stackpool[order].item.mu) 379 x = stackpoolalloc(order) 380 unlock(&stackpool[order].item.mu) 381 } else { 382 c := thisg.m.p.ptr().mcache 383 x = c.stackcache[order].list 384 if x.ptr() == nil { 385 stackcacherefill(c, order) 386 x = c.stackcache[order].list 387 } 388 c.stackcache[order].list = x.ptr().next 389 c.stackcache[order].size -= uintptr(n) 390 } 391 v = unsafe.Pointer(x) 392 } else { 393 var s *mspan 394 npage := uintptr(n) >> _PageShift 395 log2npage := stacklog2(npage) 396 397 // Try to get a stack from the large stack cache. 398 lock(&stackLarge.lock) 399 if !stackLarge.free[log2npage].isEmpty() { 400 s = stackLarge.free[log2npage].first 401 stackLarge.free[log2npage].remove(s) 402 } 403 unlock(&stackLarge.lock) 404 405 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 406 407 if s == nil { 408 // Allocate a new stack from the heap. 409 s = mheap_.allocManual(npage, spanAllocStack) 410 if s == nil { 411 throw("out of memory") 412 } 413 osStackAlloc(s) 414 s.elemsize = uintptr(n) 415 } 416 v = unsafe.Pointer(s.base()) 417 } 418 419 if raceenabled { 420 racemalloc(v, uintptr(n)) 421 } 422 if msanenabled { 423 msanmalloc(v, uintptr(n)) 424 } 425 if stackDebug >= 1 { 426 print(" allocated ", v, "\n") 427 } 428 return stack{uintptr(v), uintptr(v) + uintptr(n)} 429 } 430 431 // stackfree frees an n byte stack allocation at stk. 432 // 433 // stackfree must run on the system stack because it uses per-P 434 // resources and must not split the stack. 435 // 436 //go:systemstack 437 func stackfree(stk stack) { 438 gp := getg() 439 v := unsafe.Pointer(stk.lo) 440 n := stk.hi - stk.lo 441 if n&(n-1) != 0 { 442 throw("stack not a power of 2") 443 } 444 if stk.lo+n < stk.hi { 445 throw("bad stack size") 446 } 447 if stackDebug >= 1 { 448 println("stackfree", v, n) 449 memclrNoHeapPointers(v, n) // for testing, clobber stack data 450 } 451 if debug.efence != 0 || stackFromSystem != 0 { 452 if debug.efence != 0 || stackFaultOnFree != 0 { 453 sysFault(v, n) 454 } else { 455 sysFree(v, n, &memstats.stacks_sys) 456 } 457 return 458 } 459 if msanenabled { 460 msanfree(v, n) 461 } 462 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 463 order := uint8(0) 464 n2 := n 465 for n2 > _FixedStack { 466 order++ 467 n2 >>= 1 468 } 469 x := gclinkptr(v) 470 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { 471 lock(&stackpool[order].item.mu) 472 stackpoolfree(x, order) 473 unlock(&stackpool[order].item.mu) 474 } else { 475 c := gp.m.p.ptr().mcache 476 if c.stackcache[order].size >= _StackCacheSize { 477 stackcacherelease(c, order) 478 } 479 x.ptr().next = c.stackcache[order].list 480 c.stackcache[order].list = x 481 c.stackcache[order].size += n 482 } 483 } else { 484 s := spanOfUnchecked(uintptr(v)) 485 if s.state.get() != mSpanManual { 486 println(hex(s.base()), v) 487 throw("bad span state") 488 } 489 if gcphase == _GCoff { 490 // Free the stack immediately if we're 491 // sweeping. 492 osStackFree(s) 493 mheap_.freeManual(s, spanAllocStack) 494 } else { 495 // If the GC is running, we can't return a 496 // stack span to the heap because it could be 497 // reused as a heap span, and this state 498 // change would race with GC. Add it to the 499 // large stack cache instead. 500 log2npage := stacklog2(s.npages) 501 lock(&stackLarge.lock) 502 stackLarge.free[log2npage].insert(s) 503 unlock(&stackLarge.lock) 504 } 505 } 506 } 507 508 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 509 510 var maxstackceiling = maxstacksize 511 512 var ptrnames = []string{ 513 0: "scalar", 514 1: "ptr", 515 } 516 517 // Stack frame layout 518 // 519 // (x86) 520 // +------------------+ 521 // | args from caller | 522 // +------------------+ <- frame->argp 523 // | return address | 524 // +------------------+ 525 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 526 // +------------------+ <- frame->varp 527 // | locals | 528 // +------------------+ 529 // | args to callee | 530 // +------------------+ <- frame->sp 531 // 532 // (arm) 533 // +------------------+ 534 // | args from caller | 535 // +------------------+ <- frame->argp 536 // | caller's retaddr | 537 // +------------------+ <- frame->varp 538 // | locals | 539 // +------------------+ 540 // | args to callee | 541 // +------------------+ 542 // | return address | 543 // +------------------+ <- frame->sp 544 545 type adjustinfo struct { 546 old stack 547 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 548 cache pcvalueCache 549 550 // sghi is the highest sudog.elem on the stack. 551 sghi uintptr 552 } 553 554 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 555 // If so, it rewrites *vpp to point into the new stack. 556 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 557 pp := (*uintptr)(vpp) 558 p := *pp 559 if stackDebug >= 4 { 560 print(" ", pp, ":", hex(p), "\n") 561 } 562 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 563 *pp = p + adjinfo.delta 564 if stackDebug >= 3 { 565 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 566 } 567 } 568 } 569 570 // Information from the compiler about the layout of stack frames. 571 // Note: this type must agree with reflect.bitVector. 572 type bitvector struct { 573 n int32 // # of bits 574 bytedata *uint8 575 } 576 577 // ptrbit returns the i'th bit in bv. 578 // ptrbit is less efficient than iterating directly over bitvector bits, 579 // and should only be used in non-performance-critical code. 580 // See adjustpointers for an example of a high-efficiency walk of a bitvector. 581 func (bv *bitvector) ptrbit(i uintptr) uint8 { 582 b := *(addb(bv.bytedata, i/8)) 583 return (b >> (i % 8)) & 1 584 } 585 586 // bv describes the memory starting at address scanp. 587 // Adjust any pointers contained therein. 588 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 589 minp := adjinfo.old.lo 590 maxp := adjinfo.old.hi 591 delta := adjinfo.delta 592 num := uintptr(bv.n) 593 // If this frame might contain channel receive slots, use CAS 594 // to adjust pointers. If the slot hasn't been received into 595 // yet, it may contain stack pointers and a concurrent send 596 // could race with adjusting those pointers. (The sent value 597 // itself can never contain stack pointers.) 598 useCAS := uintptr(scanp) < adjinfo.sghi 599 for i := uintptr(0); i < num; i += 8 { 600 if stackDebug >= 4 { 601 for j := uintptr(0); j < 8; j++ { 602 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 603 } 604 } 605 b := *(addb(bv.bytedata, i/8)) 606 for b != 0 { 607 j := uintptr(sys.Ctz8(b)) 608 b &= b - 1 609 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize)) 610 retry: 611 p := *pp 612 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 613 // Looks like a junk value in a pointer slot. 614 // Live analysis wrong? 615 getg().m.traceback = 2 616 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 617 throw("invalid pointer found on stack") 618 } 619 if minp <= p && p < maxp { 620 if stackDebug >= 3 { 621 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 622 } 623 if useCAS { 624 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 625 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 626 goto retry 627 } 628 } else { 629 *pp = p + delta 630 } 631 } 632 } 633 } 634 } 635 636 // Note: the argument/return area is adjusted by the callee. 637 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 638 adjinfo := (*adjustinfo)(arg) 639 if frame.continpc == 0 { 640 // Frame is dead. 641 return true 642 } 643 f := frame.fn 644 if stackDebug >= 2 { 645 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 646 } 647 if f.funcID == funcID_systemstack_switch { 648 // A special routine at the bottom of stack of a goroutine that does a systemstack call. 649 // We will allow it to be copied even though we don't 650 // have full GC info for it (because it is written in asm). 651 return true 652 } 653 654 locals, args, objs := getStackMap(frame, &adjinfo.cache, true) 655 656 // Adjust local variables if stack frame has been allocated. 657 if locals.n > 0 { 658 size := uintptr(locals.n) * sys.PtrSize 659 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 660 } 661 662 // Adjust saved base pointer if there is one. 663 // TODO what about arm64 frame pointer adjustment? 664 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize { 665 if stackDebug >= 3 { 666 print(" saved bp\n") 667 } 668 if debugCheckBP { 669 // Frame pointers should always point to the next higher frame on 670 // the Go stack (or be nil, for the top frame on the stack). 671 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 672 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 673 println("runtime: found invalid frame pointer") 674 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 675 throw("bad frame pointer") 676 } 677 } 678 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 679 } 680 681 // Adjust arguments. 682 if args.n > 0 { 683 if stackDebug >= 3 { 684 print(" args\n") 685 } 686 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 687 } 688 689 // Adjust pointers in all stack objects (whether they are live or not). 690 // See comments in mgcmark.go:scanframeworker. 691 if frame.varp != 0 { 692 for _, obj := range objs { 693 off := obj.off 694 base := frame.varp // locals base pointer 695 if off >= 0 { 696 base = frame.argp // arguments and return values base pointer 697 } 698 p := base + uintptr(off) 699 if p < frame.sp { 700 // Object hasn't been allocated in the frame yet. 701 // (Happens when the stack bounds check fails and 702 // we call into morestack.) 703 continue 704 } 705 ptrdata := obj.ptrdata() 706 gcdata := obj.gcdata 707 var s *mspan 708 if obj.useGCProg() { 709 // See comments in mgcmark.go:scanstack 710 s = materializeGCProg(ptrdata, gcdata) 711 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 712 } 713 for i := uintptr(0); i < ptrdata; i += sys.PtrSize { 714 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { 715 adjustpointer(adjinfo, unsafe.Pointer(p+i)) 716 } 717 } 718 if s != nil { 719 dematerializeGCProg(s) 720 } 721 } 722 } 723 724 return true 725 } 726 727 func adjustctxt(gp *g, adjinfo *adjustinfo) { 728 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 729 if !framepointer_enabled { 730 return 731 } 732 if debugCheckBP { 733 bp := gp.sched.bp 734 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 735 println("runtime: found invalid top frame pointer") 736 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 737 throw("bad top frame pointer") 738 } 739 } 740 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 741 } 742 743 func adjustdefers(gp *g, adjinfo *adjustinfo) { 744 // Adjust pointers in the Defer structs. 745 // We need to do this first because we need to adjust the 746 // defer.link fields so we always work on the new stack. 747 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer)) 748 for d := gp._defer; d != nil; d = d.link { 749 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 750 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 751 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 752 adjustpointer(adjinfo, unsafe.Pointer(&d.link)) 753 adjustpointer(adjinfo, unsafe.Pointer(&d.varp)) 754 adjustpointer(adjinfo, unsafe.Pointer(&d.fd)) 755 } 756 757 // Adjust defer argument blocks the same way we adjust active stack frames. 758 // Note: this code is after the loop above, so that if a defer record is 759 // stack allocated, we work on the copy in the new stack. 760 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 761 } 762 763 func adjustpanics(gp *g, adjinfo *adjustinfo) { 764 // Panics are on stack and already adjusted. 765 // Update pointer to head of list in G. 766 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 767 } 768 769 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 770 // the data elements pointed to by a SudoG structure 771 // might be in the stack. 772 for s := gp.waiting; s != nil; s = s.waitlink { 773 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 774 } 775 } 776 777 func fillstack(stk stack, b byte) { 778 for p := stk.lo; p < stk.hi; p++ { 779 *(*byte)(unsafe.Pointer(p)) = b 780 } 781 } 782 783 func findsghi(gp *g, stk stack) uintptr { 784 var sghi uintptr 785 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 786 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 787 if stk.lo <= p && p < stk.hi && p > sghi { 788 sghi = p 789 } 790 } 791 return sghi 792 } 793 794 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 795 // stack they refer to while synchronizing with concurrent channel 796 // operations. It returns the number of bytes of stack copied. 797 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 798 if gp.waiting == nil { 799 return 0 800 } 801 802 // Lock channels to prevent concurrent send/receive. 803 var lastc *hchan 804 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 805 if sg.c != lastc { 806 // There is a ranking cycle here between gscan bit and 807 // hchan locks. Normally, we only allow acquiring hchan 808 // locks and then getting a gscan bit. In this case, we 809 // already have the gscan bit. We allow acquiring hchan 810 // locks here as a special case, since a deadlock can't 811 // happen because the G involved must already be 812 // suspended. So, we get a special hchan lock rank here 813 // that is lower than gscan, but doesn't allow acquiring 814 // any other locks other than hchan. 815 lockWithRank(&sg.c.lock, lockRankHchanLeaf) 816 } 817 lastc = sg.c 818 } 819 820 // Adjust sudogs. 821 adjustsudogs(gp, adjinfo) 822 823 // Copy the part of the stack the sudogs point in to 824 // while holding the lock to prevent races on 825 // send/receive slots. 826 var sgsize uintptr 827 if adjinfo.sghi != 0 { 828 oldBot := adjinfo.old.hi - used 829 newBot := oldBot + adjinfo.delta 830 sgsize = adjinfo.sghi - oldBot 831 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 832 } 833 834 // Unlock channels. 835 lastc = nil 836 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 837 if sg.c != lastc { 838 unlock(&sg.c.lock) 839 } 840 lastc = sg.c 841 } 842 843 return sgsize 844 } 845 846 // Copies gp's stack to a new stack of a different size. 847 // Caller must have changed gp status to Gcopystack. 848 func copystack(gp *g, newsize uintptr) { 849 if gp.syscallsp != 0 { 850 throw("stack growth not allowed in system call") 851 } 852 old := gp.stack 853 if old.lo == 0 { 854 throw("nil stackbase") 855 } 856 used := old.hi - gp.sched.sp 857 858 // allocate new stack 859 new := stackalloc(uint32(newsize)) 860 if stackPoisonCopy != 0 { 861 fillstack(new, 0xfd) 862 } 863 if stackDebug >= 1 { 864 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 865 } 866 867 // Compute adjustment. 868 var adjinfo adjustinfo 869 adjinfo.old = old 870 adjinfo.delta = new.hi - old.hi 871 872 // Adjust sudogs, synchronizing with channel ops if necessary. 873 ncopy := used 874 if !gp.activeStackChans { 875 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 { 876 // It's not safe for someone to shrink this stack while we're actively 877 // parking on a channel, but it is safe to grow since we do that 878 // ourselves and explicitly don't want to synchronize with channels 879 // since we could self-deadlock. 880 throw("racy sudog adjustment due to parking on channel") 881 } 882 adjustsudogs(gp, &adjinfo) 883 } else { 884 // sudogs may be pointing in to the stack and gp has 885 // released channel locks, so other goroutines could 886 // be writing to gp's stack. Find the highest such 887 // pointer so we can handle everything there and below 888 // carefully. (This shouldn't be far from the bottom 889 // of the stack, so there's little cost in handling 890 // everything below it carefully.) 891 adjinfo.sghi = findsghi(gp, old) 892 893 // Synchronize with channel ops and copy the part of 894 // the stack they may interact with. 895 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 896 } 897 898 // Copy the stack (or the rest of it) to the new location 899 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 900 901 // Adjust remaining structures that have pointers into stacks. 902 // We have to do most of these before we traceback the new 903 // stack because gentraceback uses them. 904 adjustctxt(gp, &adjinfo) 905 adjustdefers(gp, &adjinfo) 906 adjustpanics(gp, &adjinfo) 907 if adjinfo.sghi != 0 { 908 adjinfo.sghi += adjinfo.delta 909 } 910 911 // Swap out old stack for new one 912 gp.stack = new 913 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 914 gp.sched.sp = new.hi - used 915 gp.stktopsp += adjinfo.delta 916 917 // Adjust pointers in the new stack. 918 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 919 920 // free old stack 921 if stackPoisonCopy != 0 { 922 fillstack(old, 0xfc) 923 } 924 stackfree(old) 925 } 926 927 // round x up to a power of 2. 928 func round2(x int32) int32 { 929 s := uint(0) 930 for 1<<s < x { 931 s++ 932 } 933 return 1 << s 934 } 935 936 // Called from runtime·morestack when more stack is needed. 937 // Allocate larger stack and relocate to new stack. 938 // Stack growth is multiplicative, for constant amortized cost. 939 // 940 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 941 // If the scheduler is trying to stop this g, then it will set preemptStop. 942 // 943 // This must be nowritebarrierrec because it can be called as part of 944 // stack growth from other nowritebarrierrec functions, but the 945 // compiler doesn't check this. 946 // 947 //go:nowritebarrierrec 948 func newstack() { 949 thisg := getg() 950 // TODO: double check all gp. shouldn't be getg(). 951 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 952 throw("stack growth after fork") 953 } 954 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 955 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 956 morebuf := thisg.m.morebuf 957 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 958 throw("runtime: wrong goroutine in newstack") 959 } 960 961 gp := thisg.m.curg 962 963 if thisg.m.curg.throwsplit { 964 // Update syscallsp, syscallpc in case traceback uses them. 965 morebuf := thisg.m.morebuf 966 gp.syscallsp = morebuf.sp 967 gp.syscallpc = morebuf.pc 968 pcname, pcoff := "(unknown)", uintptr(0) 969 f := findfunc(gp.sched.pc) 970 if f.valid() { 971 pcname = funcname(f) 972 pcoff = gp.sched.pc - f.entry 973 } 974 print("runtime: newstack at ", pcname, "+", hex(pcoff), 975 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 976 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 977 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 978 979 thisg.m.traceback = 2 // Include runtime frames 980 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 981 throw("runtime: stack split at bad time") 982 } 983 984 morebuf := thisg.m.morebuf 985 thisg.m.morebuf.pc = 0 986 thisg.m.morebuf.lr = 0 987 thisg.m.morebuf.sp = 0 988 thisg.m.morebuf.g = 0 989 990 // NOTE: stackguard0 may change underfoot, if another thread 991 // is about to try to preempt gp. Read it just once and use that same 992 // value now and below. 993 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 994 995 // Be conservative about where we preempt. 996 // We are interested in preempting user Go code, not runtime code. 997 // If we're holding locks, mallocing, or preemption is disabled, don't 998 // preempt. 999 // This check is very early in newstack so that even the status change 1000 // from Grunning to Gwaiting and back doesn't happen in this case. 1001 // That status change by itself can be viewed as a small preemption, 1002 // because the GC might change Gwaiting to Gscanwaiting, and then 1003 // this goroutine has to wait for the GC to finish before continuing. 1004 // If the GC is in some way dependent on this goroutine (for example, 1005 // it needs a lock held by the goroutine), that small preemption turns 1006 // into a real deadlock. 1007 if preempt { 1008 if !canPreemptM(thisg.m) { 1009 // Let the goroutine keep running for now. 1010 // gp->preempt is set, so it will be preempted next time. 1011 gp.stackguard0 = gp.stack.lo + _StackGuard 1012 gogo(&gp.sched) // never return 1013 } 1014 } 1015 1016 if gp.stack.lo == 0 { 1017 throw("missing stack in newstack") 1018 } 1019 sp := gp.sched.sp 1020 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM { 1021 // The call to morestack cost a word. 1022 sp -= sys.PtrSize 1023 } 1024 if stackDebug >= 1 || sp < gp.stack.lo { 1025 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1026 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 1027 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1028 } 1029 if sp < gp.stack.lo { 1030 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ") 1031 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1032 throw("runtime: split stack overflow") 1033 } 1034 1035 if preempt { 1036 if gp == thisg.m.g0 { 1037 throw("runtime: preempt g0") 1038 } 1039 if thisg.m.p == 0 && thisg.m.locks == 0 { 1040 throw("runtime: g is running but p is not") 1041 } 1042 1043 if gp.preemptShrink { 1044 // We're at a synchronous safe point now, so 1045 // do the pending stack shrink. 1046 gp.preemptShrink = false 1047 shrinkstack(gp) 1048 } 1049 1050 if gp.preemptStop { 1051 preemptPark(gp) // never returns 1052 } 1053 1054 // Act like goroutine called runtime.Gosched. 1055 gopreempt_m(gp) // never return 1056 } 1057 1058 // Allocate a bigger segment and move the stack. 1059 oldsize := gp.stack.hi - gp.stack.lo 1060 newsize := oldsize * 2 1061 1062 // Make sure we grow at least as much as needed to fit the new frame. 1063 // (This is just an optimization - the caller of morestack will 1064 // recheck the bounds on return.) 1065 if f := findfunc(gp.sched.pc); f.valid() { 1066 max := uintptr(funcMaxSPDelta(f)) 1067 needed := max + _StackGuard 1068 used := gp.stack.hi - gp.sched.sp 1069 for newsize-used < needed { 1070 newsize *= 2 1071 } 1072 } 1073 1074 if gp.stackguard0 == stackForceMove { 1075 // Forced stack movement used for debugging. 1076 // Don't double the stack (or we may quickly run out 1077 // if this is done repeatedly). 1078 newsize = oldsize 1079 } 1080 1081 if newsize > maxstacksize || newsize > maxstackceiling { 1082 if maxstacksize < maxstackceiling { 1083 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1084 } else { 1085 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n") 1086 } 1087 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1088 throw("stack overflow") 1089 } 1090 1091 // The goroutine must be executing in order to call newstack, 1092 // so it must be Grunning (or Gscanrunning). 1093 casgstatus(gp, _Grunning, _Gcopystack) 1094 1095 // The concurrent GC will not scan the stack while we are doing the copy since 1096 // the gp is in a Gcopystack status. 1097 copystack(gp, newsize) 1098 if stackDebug >= 1 { 1099 print("stack grow done\n") 1100 } 1101 casgstatus(gp, _Gcopystack, _Grunning) 1102 gogo(&gp.sched) 1103 } 1104 1105 //go:nosplit 1106 func nilfunc() { 1107 *(*uint8)(nil) = 0 1108 } 1109 1110 // adjust Gobuf as if it executed a call to fn 1111 // and then stopped before the first instruction in fn. 1112 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1113 var fn unsafe.Pointer 1114 if fv != nil { 1115 fn = unsafe.Pointer(fv.fn) 1116 } else { 1117 fn = unsafe.Pointer(funcPC(nilfunc)) 1118 } 1119 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1120 } 1121 1122 // isShrinkStackSafe returns whether it's safe to attempt to shrink 1123 // gp's stack. Shrinking the stack is only safe when we have precise 1124 // pointer maps for all frames on the stack. 1125 func isShrinkStackSafe(gp *g) bool { 1126 // We can't copy the stack if we're in a syscall. 1127 // The syscall might have pointers into the stack and 1128 // often we don't have precise pointer maps for the innermost 1129 // frames. 1130 // 1131 // We also can't copy the stack if we're at an asynchronous 1132 // safe-point because we don't have precise pointer maps for 1133 // all frames. 1134 // 1135 // We also can't *shrink* the stack in the window between the 1136 // goroutine calling gopark to park on a channel and 1137 // gp.activeStackChans being set. 1138 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0 1139 } 1140 1141 // Maybe shrink the stack being used by gp. 1142 // 1143 // gp must be stopped and we must own its stack. It may be in 1144 // _Grunning, but only if this is our own user G. 1145 func shrinkstack(gp *g) { 1146 if gp.stack.lo == 0 { 1147 throw("missing stack in shrinkstack") 1148 } 1149 if s := readgstatus(gp); s&_Gscan == 0 { 1150 // We don't own the stack via _Gscan. We could still 1151 // own it if this is our own user G and we're on the 1152 // system stack. 1153 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) { 1154 // We don't own the stack. 1155 throw("bad status in shrinkstack") 1156 } 1157 } 1158 if !isShrinkStackSafe(gp) { 1159 throw("shrinkstack at bad time") 1160 } 1161 // Check for self-shrinks while in a libcall. These may have 1162 // pointers into the stack disguised as uintptrs, but these 1163 // code paths should all be nosplit. 1164 if gp == getg().m.curg && gp.m.libcallsp != 0 { 1165 throw("shrinking stack in libcall") 1166 } 1167 1168 if debug.gcshrinkstackoff > 0 { 1169 return 1170 } 1171 f := findfunc(gp.startpc) 1172 if f.valid() && f.funcID == funcID_gcBgMarkWorker { 1173 // We're not allowed to shrink the gcBgMarkWorker 1174 // stack (see gcBgMarkWorker for explanation). 1175 return 1176 } 1177 1178 oldsize := gp.stack.hi - gp.stack.lo 1179 newsize := oldsize / 2 1180 // Don't shrink the allocation below the minimum-sized stack 1181 // allocation. 1182 if newsize < _FixedStack { 1183 return 1184 } 1185 // Compute how much of the stack is currently in use and only 1186 // shrink the stack if gp is using less than a quarter of its 1187 // current stack. The currently used stack includes everything 1188 // down to the SP plus the stack guard space that ensures 1189 // there's room for nosplit functions. 1190 avail := gp.stack.hi - gp.stack.lo 1191 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1192 return 1193 } 1194 1195 if stackDebug > 0 { 1196 print("shrinking stack ", oldsize, "->", newsize, "\n") 1197 } 1198 1199 copystack(gp, newsize) 1200 } 1201 1202 // freeStackSpans frees unused stack spans at the end of GC. 1203 func freeStackSpans() { 1204 1205 // Scan stack pools for empty stack spans. 1206 for order := range stackpool { 1207 lock(&stackpool[order].item.mu) 1208 list := &stackpool[order].item.span 1209 for s := list.first; s != nil; { 1210 next := s.next 1211 if s.allocCount == 0 { 1212 list.remove(s) 1213 s.manualFreeList = 0 1214 osStackFree(s) 1215 mheap_.freeManual(s, spanAllocStack) 1216 } 1217 s = next 1218 } 1219 unlock(&stackpool[order].item.mu) 1220 } 1221 1222 // Free large stack spans. 1223 lock(&stackLarge.lock) 1224 for i := range stackLarge.free { 1225 for s := stackLarge.free[i].first; s != nil; { 1226 next := s.next 1227 stackLarge.free[i].remove(s) 1228 osStackFree(s) 1229 mheap_.freeManual(s, spanAllocStack) 1230 s = next 1231 } 1232 } 1233 unlock(&stackLarge.lock) 1234 } 1235 1236 // getStackMap returns the locals and arguments live pointer maps, and 1237 // stack object list for frame. 1238 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) { 1239 targetpc := frame.continpc 1240 if targetpc == 0 { 1241 // Frame is dead. Return empty bitvectors. 1242 return 1243 } 1244 1245 f := frame.fn 1246 pcdata := int32(-1) 1247 if targetpc != f.entry { 1248 // Back up to the CALL. If we're at the function entry 1249 // point, we want to use the entry map (-1), even if 1250 // the first instruction of the function changes the 1251 // stack map. 1252 targetpc-- 1253 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 1254 } 1255 if pcdata == -1 { 1256 // We do not have a valid pcdata value but there might be a 1257 // stackmap for this function. It is likely that we are looking 1258 // at the function prologue, assume so and hope for the best. 1259 pcdata = 0 1260 } 1261 1262 // Local variables. 1263 size := frame.varp - frame.sp 1264 var minsize uintptr 1265 switch sys.ArchFamily { 1266 case sys.ARM64: 1267 minsize = sys.StackAlign 1268 default: 1269 minsize = sys.MinFrameSize 1270 } 1271 if size > minsize { 1272 stackid := pcdata 1273 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 1274 if stkmap == nil || stkmap.n <= 0 { 1275 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 1276 throw("missing stackmap") 1277 } 1278 // If nbit == 0, there's no work to do. 1279 if stkmap.nbit > 0 { 1280 if stackid < 0 || stackid >= stkmap.n { 1281 // don't know where we are 1282 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1283 throw("bad symbol table") 1284 } 1285 locals = stackmapdata(stkmap, stackid) 1286 if stackDebug >= 3 && debug { 1287 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n") 1288 } 1289 } else if stackDebug >= 3 && debug { 1290 print(" no locals to adjust\n") 1291 } 1292 } 1293 1294 // Arguments. 1295 if frame.arglen > 0 { 1296 if frame.argmap != nil { 1297 // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. 1298 // In this case, arglen specifies how much of the args section is actually live. 1299 // (It could be either all the args + results, or just the args.) 1300 args = *frame.argmap 1301 n := int32(frame.arglen / sys.PtrSize) 1302 if n < args.n { 1303 args.n = n // Don't use more of the arguments than arglen. 1304 } 1305 } else { 1306 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 1307 if stackmap == nil || stackmap.n <= 0 { 1308 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 1309 throw("missing stackmap") 1310 } 1311 if pcdata < 0 || pcdata >= stackmap.n { 1312 // don't know where we are 1313 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1314 throw("bad symbol table") 1315 } 1316 if stackmap.nbit > 0 { 1317 args = stackmapdata(stackmap, pcdata) 1318 } 1319 } 1320 } 1321 1322 // stack objects. 1323 if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil { 1324 // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. 1325 // We don't actually use argmap in this case, but we need to fake the stack object 1326 // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset 1327 // on amd64. 1328 objs = methodValueCallFrameObjs 1329 } else { 1330 p := funcdata(f, _FUNCDATA_StackObjects) 1331 if p != nil { 1332 n := *(*uintptr)(p) 1333 p = add(p, sys.PtrSize) 1334 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} 1335 // Note: the noescape above is needed to keep 1336 // getStackMap from "leaking param content: 1337 // frame". That leak propagates up to getgcmask, then 1338 // GCMask, then verifyGCInfo, which converts the stack 1339 // gcinfo tests into heap gcinfo tests :( 1340 } 1341 } 1342 1343 return 1344 } 1345 1346 var ( 1347 abiRegArgsEface interface{} = abi.RegArgs{} 1348 abiRegArgsType *_type = efaceOf(&abiRegArgsEface)._type 1349 methodValueCallFrameObjs = []stackObjectRecord{ 1350 { 1351 off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local. 1352 size: int32(abiRegArgsType.size), 1353 _ptrdata: int32(abiRegArgsType.ptrdata), 1354 gcdata: abiRegArgsType.gcdata, 1355 }, 1356 } 1357 ) 1358 1359 func init() { 1360 if abiRegArgsType.kind&kindGCProg != 0 { 1361 throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs") 1362 } 1363 } 1364 1365 // A stackObjectRecord is generated by the compiler for each stack object in a stack frame. 1366 // This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects. 1367 type stackObjectRecord struct { 1368 // offset in frame 1369 // if negative, offset from varp 1370 // if non-negative, offset from argp 1371 off int32 1372 size int32 1373 _ptrdata int32 // ptrdata, or -ptrdata is GC prog is used 1374 gcdata *byte // pointer map or GC prog of the type 1375 } 1376 1377 func (r *stackObjectRecord) useGCProg() bool { 1378 return r._ptrdata < 0 1379 } 1380 1381 func (r *stackObjectRecord) ptrdata() uintptr { 1382 x := r._ptrdata 1383 if x < 0 { 1384 return uintptr(-x) 1385 } 1386 return uintptr(x) 1387 } 1388 1389 // This is exported as ABI0 via linkname so obj can call it. 1390 // 1391 //go:nosplit 1392 //go:linkname morestackc 1393 func morestackc() { 1394 throw("attempt to execute system stack code on user stack") 1395 }