github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 /* 15 Stack layout parameters. 16 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 17 18 The per-goroutine g->stackguard is set to point StackGuard bytes 19 above the bottom of the stack. Each function compares its stack 20 pointer against g->stackguard to check for overflow. To cut one 21 instruction from the check sequence for functions with tiny frames, 22 the stack is allowed to protrude StackSmall bytes below the stack 23 guard. Functions with large frames don't bother with the check and 24 always call morestack. The sequences are (for amd64, others are 25 similar): 26 27 guard = g->stackguard 28 frame = function's stack frame size 29 argsize = size of function arguments (call + return) 30 31 stack frame size <= StackSmall: 32 CMPQ guard, SP 33 JHI 3(PC) 34 MOVQ m->morearg, $(argsize << 32) 35 CALL morestack(SB) 36 37 stack frame size > StackSmall but < StackBig 38 LEAQ (frame-StackSmall)(SP), R0 39 CMPQ guard, R0 40 JHI 3(PC) 41 MOVQ m->morearg, $(argsize << 32) 42 CALL morestack(SB) 43 44 stack frame size >= StackBig: 45 MOVQ m->morearg, $((argsize << 32) | frame) 46 CALL morestack(SB) 47 48 The bottom StackGuard - StackSmall bytes are important: there has 49 to be enough room to execute functions that refuse to check for 50 stack overflow, either because they need to be adjacent to the 51 actual caller's frame (deferproc) or because they handle the imminent 52 stack overflow (morestack). 53 54 For example, deferproc might call malloc, which does one of the 55 above checks (without allocating a full frame), which might trigger 56 a call to morestack. This sequence needs to fit in the bottom 57 section of the stack. On amd64, morestack's frame is 40 bytes, and 58 deferproc's frame is 56 bytes. That fits well within the 59 StackGuard - StackSmall bytes at the bottom. 60 The linkers explore all possible call traces involving non-splitting 61 functions to make sure that this limit cannot be violated. 62 */ 63 64 const ( 65 // StackSystem is a number of additional bytes to add 66 // to each stack below the usual guard area for OS-specific 67 // purposes like signal handling. Used on Windows, Plan 9, 68 // and iOS because they do not use a separate stack. 69 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024 70 71 // The minimum size of stack used by Go code 72 _StackMin = 2048 73 74 // The minimum stack size to allocate. 75 // The hackery here rounds FixedStack0 up to a power of 2. 76 _FixedStack0 = _StackMin + _StackSystem 77 _FixedStack1 = _FixedStack0 - 1 78 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 79 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 80 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 81 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 82 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 83 _FixedStack = _FixedStack6 + 1 84 85 // Functions that need frames bigger than this use an extra 86 // instruction to do the stack split check, to avoid overflow 87 // in case SP - framesize wraps below zero. 88 // This value can be no bigger than the size of the unmapped 89 // space at zero. 90 _StackBig = 4096 91 92 // The stack guard is a pointer this many bytes above the 93 // bottom of the stack. 94 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem 95 96 // After a stack split check the SP is allowed to be this 97 // many bytes below the stack guard. This saves an instruction 98 // in the checking sequence for tiny frames. 99 _StackSmall = 128 100 101 // The maximum number of bytes that a chain of NOSPLIT 102 // functions can use. 103 _StackLimit = _StackGuard - _StackSystem - _StackSmall 104 ) 105 106 const ( 107 // stackDebug == 0: no logging 108 // == 1: logging of per-stack operations 109 // == 2: logging of per-frame operations 110 // == 3: logging of per-word updates 111 // == 4: logging of per-word reads 112 stackDebug = 0 113 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 114 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 115 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 116 stackNoCache = 0 // disable per-P small stack caches 117 118 // check the BP links during traceback. 119 debugCheckBP = false 120 ) 121 122 const ( 123 uintptrMask = 1<<(8*sys.PtrSize) - 1 124 125 // Goroutine preemption request. 126 // Stored into g->stackguard0 to cause split stack check failure. 127 // Must be greater than any real sp. 128 // 0xfffffade in hex. 129 stackPreempt = uintptrMask & -1314 130 131 // Thread is forking. 132 // Stored into g->stackguard0 to cause split stack check failure. 133 // Must be greater than any real sp. 134 stackFork = uintptrMask & -1234 135 ) 136 137 // Global pool of spans that have free stacks. 138 // Stacks are assigned an order according to size. 139 // order = log_2(size/FixedStack) 140 // There is a free list for each order. 141 var stackpool [_NumStackOrders]struct { 142 item stackpoolItem 143 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte 144 } 145 146 //go:notinheap 147 type stackpoolItem struct { 148 mu mutex 149 span mSpanList 150 } 151 152 // Global pool of large stack spans. 153 var stackLarge struct { 154 lock mutex 155 free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages) 156 } 157 158 func stackinit() { 159 if _StackCacheSize&_PageMask != 0 { 160 throw("cache size must be a multiple of page size") 161 } 162 for i := range stackpool { 163 stackpool[i].item.span.init() 164 lockInit(&stackpool[i].item.mu, lockRankStackpool) 165 } 166 for i := range stackLarge.free { 167 stackLarge.free[i].init() 168 lockInit(&stackLarge.lock, lockRankStackLarge) 169 } 170 } 171 172 // stacklog2 returns ⌊log_2(n)⌋. 173 func stacklog2(n uintptr) int { 174 log2 := 0 175 for n > 1 { 176 n >>= 1 177 log2++ 178 } 179 return log2 180 } 181 182 // Allocates a stack from the free pool. Must be called with 183 // stackpool[order].item.mu held. 184 func stackpoolalloc(order uint8) gclinkptr { 185 list := &stackpool[order].item.span 186 s := list.first 187 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 188 if s == nil { 189 // no free stacks. Allocate another span worth. 190 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack) 191 if s == nil { 192 throw("out of memory") 193 } 194 if s.allocCount != 0 { 195 throw("bad allocCount") 196 } 197 if s.manualFreeList.ptr() != nil { 198 throw("bad manualFreeList") 199 } 200 osStackAlloc(s) 201 s.elemsize = _FixedStack << order 202 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 203 x := gclinkptr(s.base() + i) 204 x.ptr().next = s.manualFreeList 205 s.manualFreeList = x 206 } 207 list.insert(s) 208 } 209 x := s.manualFreeList 210 if x.ptr() == nil { 211 throw("span has no free stacks") 212 } 213 s.manualFreeList = x.ptr().next 214 s.allocCount++ 215 if s.manualFreeList.ptr() == nil { 216 // all stacks in s are allocated. 217 list.remove(s) 218 } 219 return x 220 } 221 222 // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held. 223 func stackpoolfree(x gclinkptr, order uint8) { 224 s := spanOfUnchecked(uintptr(x)) 225 if s.state.get() != mSpanManual { 226 throw("freeing stack not in a stack span") 227 } 228 if s.manualFreeList.ptr() == nil { 229 // s will now have a free stack 230 stackpool[order].item.span.insert(s) 231 } 232 x.ptr().next = s.manualFreeList 233 s.manualFreeList = x 234 s.allocCount-- 235 if gcphase == _GCoff && s.allocCount == 0 { 236 // Span is completely free. Return it to the heap 237 // immediately if we're sweeping. 238 // 239 // If GC is active, we delay the free until the end of 240 // GC to avoid the following type of situation: 241 // 242 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 243 // 2) The stack that pointer points to is copied 244 // 3) The old stack is freed 245 // 4) The containing span is marked free 246 // 5) GC attempts to mark the SudoG.elem pointer. The 247 // marking fails because the pointer looks like a 248 // pointer into a free span. 249 // 250 // By not freeing, we prevent step #4 until GC is done. 251 stackpool[order].item.span.remove(s) 252 s.manualFreeList = 0 253 osStackFree(s) 254 mheap_.freeManual(s, spanAllocStack) 255 } 256 } 257 258 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 259 // The pool is required to prevent unlimited growth of per-thread caches. 260 // 261 //go:systemstack 262 func stackcacherefill(c *mcache, order uint8) { 263 if stackDebug >= 1 { 264 print("stackcacherefill order=", order, "\n") 265 } 266 267 // Grab some stacks from the global cache. 268 // Grab half of the allowed capacity (to prevent thrashing). 269 var list gclinkptr 270 var size uintptr 271 lock(&stackpool[order].item.mu) 272 for size < _StackCacheSize/2 { 273 x := stackpoolalloc(order) 274 x.ptr().next = list 275 list = x 276 size += _FixedStack << order 277 } 278 unlock(&stackpool[order].item.mu) 279 c.stackcache[order].list = list 280 c.stackcache[order].size = size 281 } 282 283 //go:systemstack 284 func stackcacherelease(c *mcache, order uint8) { 285 if stackDebug >= 1 { 286 print("stackcacherelease order=", order, "\n") 287 } 288 x := c.stackcache[order].list 289 size := c.stackcache[order].size 290 lock(&stackpool[order].item.mu) 291 for size > _StackCacheSize/2 { 292 y := x.ptr().next 293 stackpoolfree(x, order) 294 x = y 295 size -= _FixedStack << order 296 } 297 unlock(&stackpool[order].item.mu) 298 c.stackcache[order].list = x 299 c.stackcache[order].size = size 300 } 301 302 //go:systemstack 303 func stackcache_clear(c *mcache) { 304 if stackDebug >= 1 { 305 print("stackcache clear\n") 306 } 307 for order := uint8(0); order < _NumStackOrders; order++ { 308 lock(&stackpool[order].item.mu) 309 x := c.stackcache[order].list 310 for x.ptr() != nil { 311 y := x.ptr().next 312 stackpoolfree(x, order) 313 x = y 314 } 315 c.stackcache[order].list = 0 316 c.stackcache[order].size = 0 317 unlock(&stackpool[order].item.mu) 318 } 319 } 320 321 // stackalloc allocates an n byte stack. 322 // 323 // stackalloc must run on the system stack because it uses per-P 324 // resources and must not split the stack. 325 // 326 //go:systemstack 327 func stackalloc(n uint32) stack { 328 // Stackalloc must be called on scheduler stack, so that we 329 // never try to grow the stack during the code that stackalloc runs. 330 // Doing so would cause a deadlock (issue 1547). 331 thisg := getg() 332 if thisg != thisg.m.g0 { 333 throw("stackalloc not on scheduler stack") 334 } 335 if n&(n-1) != 0 { 336 throw("stack size not a power of 2") 337 } 338 if stackDebug >= 1 { 339 print("stackalloc ", n, "\n") 340 } 341 342 if debug.efence != 0 || stackFromSystem != 0 { 343 n = uint32(alignUp(uintptr(n), physPageSize)) 344 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 345 if v == nil { 346 throw("out of memory (stackalloc)") 347 } 348 return stack{uintptr(v), uintptr(v) + uintptr(n)} 349 } 350 351 // Small stacks are allocated with a fixed-size free-list allocator. 352 // If we need a stack of a bigger size, we fall back on allocating 353 // a dedicated span. 354 var v unsafe.Pointer 355 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 356 order := uint8(0) 357 n2 := n 358 for n2 > _FixedStack { 359 order++ 360 n2 >>= 1 361 } 362 var x gclinkptr 363 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { 364 // thisg.m.p == 0 can happen in the guts of exitsyscall 365 // or procresize. Just get a stack from the global pool. 366 // Also don't touch stackcache during gc 367 // as it's flushed concurrently. 368 lock(&stackpool[order].item.mu) 369 x = stackpoolalloc(order) 370 unlock(&stackpool[order].item.mu) 371 } else { 372 c := thisg.m.p.ptr().mcache 373 x = c.stackcache[order].list 374 if x.ptr() == nil { 375 stackcacherefill(c, order) 376 x = c.stackcache[order].list 377 } 378 c.stackcache[order].list = x.ptr().next 379 c.stackcache[order].size -= uintptr(n) 380 } 381 v = unsafe.Pointer(x) 382 } else { 383 var s *mspan 384 npage := uintptr(n) >> _PageShift 385 log2npage := stacklog2(npage) 386 387 // Try to get a stack from the large stack cache. 388 lock(&stackLarge.lock) 389 if !stackLarge.free[log2npage].isEmpty() { 390 s = stackLarge.free[log2npage].first 391 stackLarge.free[log2npage].remove(s) 392 } 393 unlock(&stackLarge.lock) 394 395 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 396 397 if s == nil { 398 // Allocate a new stack from the heap. 399 s = mheap_.allocManual(npage, spanAllocStack) 400 if s == nil { 401 throw("out of memory") 402 } 403 osStackAlloc(s) 404 s.elemsize = uintptr(n) 405 } 406 v = unsafe.Pointer(s.base()) 407 } 408 409 if raceenabled { 410 racemalloc(v, uintptr(n)) 411 } 412 if msanenabled { 413 msanmalloc(v, uintptr(n)) 414 } 415 if stackDebug >= 1 { 416 print(" allocated ", v, "\n") 417 } 418 return stack{uintptr(v), uintptr(v) + uintptr(n)} 419 } 420 421 // stackfree frees an n byte stack allocation at stk. 422 // 423 // stackfree must run on the system stack because it uses per-P 424 // resources and must not split the stack. 425 // 426 //go:systemstack 427 func stackfree(stk stack) { 428 gp := getg() 429 v := unsafe.Pointer(stk.lo) 430 n := stk.hi - stk.lo 431 if n&(n-1) != 0 { 432 throw("stack not a power of 2") 433 } 434 if stk.lo+n < stk.hi { 435 throw("bad stack size") 436 } 437 if stackDebug >= 1 { 438 println("stackfree", v, n) 439 memclrNoHeapPointers(v, n) // for testing, clobber stack data 440 } 441 if debug.efence != 0 || stackFromSystem != 0 { 442 if debug.efence != 0 || stackFaultOnFree != 0 { 443 sysFault(v, n) 444 } else { 445 sysFree(v, n, &memstats.stacks_sys) 446 } 447 return 448 } 449 if msanenabled { 450 msanfree(v, n) 451 } 452 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 453 order := uint8(0) 454 n2 := n 455 for n2 > _FixedStack { 456 order++ 457 n2 >>= 1 458 } 459 x := gclinkptr(v) 460 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { 461 lock(&stackpool[order].item.mu) 462 stackpoolfree(x, order) 463 unlock(&stackpool[order].item.mu) 464 } else { 465 c := gp.m.p.ptr().mcache 466 if c.stackcache[order].size >= _StackCacheSize { 467 stackcacherelease(c, order) 468 } 469 x.ptr().next = c.stackcache[order].list 470 c.stackcache[order].list = x 471 c.stackcache[order].size += n 472 } 473 } else { 474 s := spanOfUnchecked(uintptr(v)) 475 if s.state.get() != mSpanManual { 476 println(hex(s.base()), v) 477 throw("bad span state") 478 } 479 if gcphase == _GCoff { 480 // Free the stack immediately if we're 481 // sweeping. 482 osStackFree(s) 483 mheap_.freeManual(s, spanAllocStack) 484 } else { 485 // If the GC is running, we can't return a 486 // stack span to the heap because it could be 487 // reused as a heap span, and this state 488 // change would race with GC. Add it to the 489 // large stack cache instead. 490 log2npage := stacklog2(s.npages) 491 lock(&stackLarge.lock) 492 stackLarge.free[log2npage].insert(s) 493 unlock(&stackLarge.lock) 494 } 495 } 496 } 497 498 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 499 500 var maxstackceiling = maxstacksize 501 502 var ptrnames = []string{ 503 0: "scalar", 504 1: "ptr", 505 } 506 507 // Stack frame layout 508 // 509 // (x86) 510 // +------------------+ 511 // | args from caller | 512 // +------------------+ <- frame->argp 513 // | return address | 514 // +------------------+ 515 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 516 // +------------------+ <- frame->varp 517 // | locals | 518 // +------------------+ 519 // | args to callee | 520 // +------------------+ <- frame->sp 521 // 522 // (arm) 523 // +------------------+ 524 // | args from caller | 525 // +------------------+ <- frame->argp 526 // | caller's retaddr | 527 // +------------------+ <- frame->varp 528 // | locals | 529 // +------------------+ 530 // | args to callee | 531 // +------------------+ 532 // | return address | 533 // +------------------+ <- frame->sp 534 535 type adjustinfo struct { 536 old stack 537 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 538 cache pcvalueCache 539 540 // sghi is the highest sudog.elem on the stack. 541 sghi uintptr 542 } 543 544 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 545 // If so, it rewrites *vpp to point into the new stack. 546 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 547 pp := (*uintptr)(vpp) 548 p := *pp 549 if stackDebug >= 4 { 550 print(" ", pp, ":", hex(p), "\n") 551 } 552 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 553 *pp = p + adjinfo.delta 554 if stackDebug >= 3 { 555 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 556 } 557 } 558 } 559 560 // Information from the compiler about the layout of stack frames. 561 // Note: this type must agree with reflect.bitVector. 562 type bitvector struct { 563 n int32 // # of bits 564 bytedata *uint8 565 } 566 567 // ptrbit returns the i'th bit in bv. 568 // ptrbit is less efficient than iterating directly over bitvector bits, 569 // and should only be used in non-performance-critical code. 570 // See adjustpointers for an example of a high-efficiency walk of a bitvector. 571 func (bv *bitvector) ptrbit(i uintptr) uint8 { 572 b := *(addb(bv.bytedata, i/8)) 573 return (b >> (i % 8)) & 1 574 } 575 576 // bv describes the memory starting at address scanp. 577 // Adjust any pointers contained therein. 578 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 579 minp := adjinfo.old.lo 580 maxp := adjinfo.old.hi 581 delta := adjinfo.delta 582 num := uintptr(bv.n) 583 // If this frame might contain channel receive slots, use CAS 584 // to adjust pointers. If the slot hasn't been received into 585 // yet, it may contain stack pointers and a concurrent send 586 // could race with adjusting those pointers. (The sent value 587 // itself can never contain stack pointers.) 588 useCAS := uintptr(scanp) < adjinfo.sghi 589 for i := uintptr(0); i < num; i += 8 { 590 if stackDebug >= 4 { 591 for j := uintptr(0); j < 8; j++ { 592 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 593 } 594 } 595 b := *(addb(bv.bytedata, i/8)) 596 for b != 0 { 597 j := uintptr(sys.Ctz8(b)) 598 b &= b - 1 599 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize)) 600 retry: 601 p := *pp 602 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 603 // Looks like a junk value in a pointer slot. 604 // Live analysis wrong? 605 getg().m.traceback = 2 606 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 607 throw("invalid pointer found on stack") 608 } 609 if minp <= p && p < maxp { 610 if stackDebug >= 3 { 611 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 612 } 613 if useCAS { 614 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 615 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 616 goto retry 617 } 618 } else { 619 *pp = p + delta 620 } 621 } 622 } 623 } 624 } 625 626 // Note: the argument/return area is adjusted by the callee. 627 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 628 adjinfo := (*adjustinfo)(arg) 629 if frame.continpc == 0 { 630 // Frame is dead. 631 return true 632 } 633 f := frame.fn 634 if stackDebug >= 2 { 635 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 636 } 637 if f.funcID == funcID_systemstack_switch { 638 // A special routine at the bottom of stack of a goroutine that does a systemstack call. 639 // We will allow it to be copied even though we don't 640 // have full GC info for it (because it is written in asm). 641 return true 642 } 643 644 locals, args, objs := getStackMap(frame, &adjinfo.cache, true) 645 646 // Adjust local variables if stack frame has been allocated. 647 if locals.n > 0 { 648 size := uintptr(locals.n) * sys.PtrSize 649 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 650 } 651 652 // Adjust saved base pointer if there is one. 653 // TODO what about arm64 frame pointer adjustment? 654 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 655 if stackDebug >= 3 { 656 print(" saved bp\n") 657 } 658 if debugCheckBP { 659 // Frame pointers should always point to the next higher frame on 660 // the Go stack (or be nil, for the top frame on the stack). 661 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 662 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 663 println("runtime: found invalid frame pointer") 664 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 665 throw("bad frame pointer") 666 } 667 } 668 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 669 } 670 671 // Adjust arguments. 672 if args.n > 0 { 673 if stackDebug >= 3 { 674 print(" args\n") 675 } 676 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 677 } 678 679 // Adjust pointers in all stack objects (whether they are live or not). 680 // See comments in mgcmark.go:scanframeworker. 681 if frame.varp != 0 { 682 for _, obj := range objs { 683 off := obj.off 684 base := frame.varp // locals base pointer 685 if off >= 0 { 686 base = frame.argp // arguments and return values base pointer 687 } 688 p := base + uintptr(off) 689 if p < frame.sp { 690 // Object hasn't been allocated in the frame yet. 691 // (Happens when the stack bounds check fails and 692 // we call into morestack.) 693 continue 694 } 695 t := obj.typ 696 gcdata := t.gcdata 697 var s *mspan 698 if t.kind&kindGCProg != 0 { 699 // See comments in mgcmark.go:scanstack 700 s = materializeGCProg(t.ptrdata, gcdata) 701 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 702 } 703 for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize { 704 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { 705 adjustpointer(adjinfo, unsafe.Pointer(p+i)) 706 } 707 } 708 if s != nil { 709 dematerializeGCProg(s) 710 } 711 } 712 } 713 714 return true 715 } 716 717 func adjustctxt(gp *g, adjinfo *adjustinfo) { 718 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 719 if !framepointer_enabled { 720 return 721 } 722 if debugCheckBP { 723 bp := gp.sched.bp 724 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 725 println("runtime: found invalid top frame pointer") 726 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 727 throw("bad top frame pointer") 728 } 729 } 730 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 731 } 732 733 func adjustdefers(gp *g, adjinfo *adjustinfo) { 734 // Adjust pointers in the Defer structs. 735 // We need to do this first because we need to adjust the 736 // defer.link fields so we always work on the new stack. 737 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer)) 738 for d := gp._defer; d != nil; d = d.link { 739 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 740 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 741 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 742 adjustpointer(adjinfo, unsafe.Pointer(&d.link)) 743 adjustpointer(adjinfo, unsafe.Pointer(&d.varp)) 744 adjustpointer(adjinfo, unsafe.Pointer(&d.fd)) 745 } 746 747 // Adjust defer argument blocks the same way we adjust active stack frames. 748 // Note: this code is after the loop above, so that if a defer record is 749 // stack allocated, we work on the copy in the new stack. 750 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 751 } 752 753 func adjustpanics(gp *g, adjinfo *adjustinfo) { 754 // Panics are on stack and already adjusted. 755 // Update pointer to head of list in G. 756 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 757 } 758 759 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 760 // the data elements pointed to by a SudoG structure 761 // might be in the stack. 762 for s := gp.waiting; s != nil; s = s.waitlink { 763 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 764 } 765 } 766 767 func fillstack(stk stack, b byte) { 768 for p := stk.lo; p < stk.hi; p++ { 769 *(*byte)(unsafe.Pointer(p)) = b 770 } 771 } 772 773 func findsghi(gp *g, stk stack) uintptr { 774 var sghi uintptr 775 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 776 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 777 if stk.lo <= p && p < stk.hi && p > sghi { 778 sghi = p 779 } 780 } 781 return sghi 782 } 783 784 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 785 // stack they refer to while synchronizing with concurrent channel 786 // operations. It returns the number of bytes of stack copied. 787 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 788 if gp.waiting == nil { 789 return 0 790 } 791 792 // Lock channels to prevent concurrent send/receive. 793 var lastc *hchan 794 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 795 if sg.c != lastc { 796 // There is a ranking cycle here between gscan bit and 797 // hchan locks. Normally, we only allow acquiring hchan 798 // locks and then getting a gscan bit. In this case, we 799 // already have the gscan bit. We allow acquiring hchan 800 // locks here as a special case, since a deadlock can't 801 // happen because the G involved must already be 802 // suspended. So, we get a special hchan lock rank here 803 // that is lower than gscan, but doesn't allow acquiring 804 // any other locks other than hchan. 805 lockWithRank(&sg.c.lock, lockRankHchanLeaf) 806 } 807 lastc = sg.c 808 } 809 810 // Adjust sudogs. 811 adjustsudogs(gp, adjinfo) 812 813 // Copy the part of the stack the sudogs point in to 814 // while holding the lock to prevent races on 815 // send/receive slots. 816 var sgsize uintptr 817 if adjinfo.sghi != 0 { 818 oldBot := adjinfo.old.hi - used 819 newBot := oldBot + adjinfo.delta 820 sgsize = adjinfo.sghi - oldBot 821 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 822 } 823 824 // Unlock channels. 825 lastc = nil 826 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 827 if sg.c != lastc { 828 unlock(&sg.c.lock) 829 } 830 lastc = sg.c 831 } 832 833 return sgsize 834 } 835 836 // Copies gp's stack to a new stack of a different size. 837 // Caller must have changed gp status to Gcopystack. 838 func copystack(gp *g, newsize uintptr) { 839 if gp.syscallsp != 0 { 840 throw("stack growth not allowed in system call") 841 } 842 old := gp.stack 843 if old.lo == 0 { 844 throw("nil stackbase") 845 } 846 used := old.hi - gp.sched.sp 847 848 // allocate new stack 849 new := stackalloc(uint32(newsize)) 850 if stackPoisonCopy != 0 { 851 fillstack(new, 0xfd) 852 } 853 if stackDebug >= 1 { 854 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 855 } 856 857 // Compute adjustment. 858 var adjinfo adjustinfo 859 adjinfo.old = old 860 adjinfo.delta = new.hi - old.hi 861 862 // Adjust sudogs, synchronizing with channel ops if necessary. 863 ncopy := used 864 if !gp.activeStackChans { 865 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 { 866 // It's not safe for someone to shrink this stack while we're actively 867 // parking on a channel, but it is safe to grow since we do that 868 // ourselves and explicitly don't want to synchronize with channels 869 // since we could self-deadlock. 870 throw("racy sudog adjustment due to parking on channel") 871 } 872 adjustsudogs(gp, &adjinfo) 873 } else { 874 // sudogs may be pointing in to the stack and gp has 875 // released channel locks, so other goroutines could 876 // be writing to gp's stack. Find the highest such 877 // pointer so we can handle everything there and below 878 // carefully. (This shouldn't be far from the bottom 879 // of the stack, so there's little cost in handling 880 // everything below it carefully.) 881 adjinfo.sghi = findsghi(gp, old) 882 883 // Synchronize with channel ops and copy the part of 884 // the stack they may interact with. 885 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 886 } 887 888 // Copy the stack (or the rest of it) to the new location 889 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 890 891 // Adjust remaining structures that have pointers into stacks. 892 // We have to do most of these before we traceback the new 893 // stack because gentraceback uses them. 894 adjustctxt(gp, &adjinfo) 895 adjustdefers(gp, &adjinfo) 896 adjustpanics(gp, &adjinfo) 897 if adjinfo.sghi != 0 { 898 adjinfo.sghi += adjinfo.delta 899 } 900 901 // Swap out old stack for new one 902 gp.stack = new 903 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 904 gp.sched.sp = new.hi - used 905 gp.stktopsp += adjinfo.delta 906 907 // Adjust pointers in the new stack. 908 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 909 910 // free old stack 911 if stackPoisonCopy != 0 { 912 fillstack(old, 0xfc) 913 } 914 stackfree(old) 915 } 916 917 // round x up to a power of 2. 918 func round2(x int32) int32 { 919 s := uint(0) 920 for 1<<s < x { 921 s++ 922 } 923 return 1 << s 924 } 925 926 // Called from runtime·morestack when more stack is needed. 927 // Allocate larger stack and relocate to new stack. 928 // Stack growth is multiplicative, for constant amortized cost. 929 // 930 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 931 // If the scheduler is trying to stop this g, then it will set preemptStop. 932 // 933 // This must be nowritebarrierrec because it can be called as part of 934 // stack growth from other nowritebarrierrec functions, but the 935 // compiler doesn't check this. 936 // 937 //go:nowritebarrierrec 938 func newstack() { 939 thisg := getg() 940 // TODO: double check all gp. shouldn't be getg(). 941 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 942 throw("stack growth after fork") 943 } 944 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 945 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 946 morebuf := thisg.m.morebuf 947 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 948 throw("runtime: wrong goroutine in newstack") 949 } 950 951 gp := thisg.m.curg 952 953 if thisg.m.curg.throwsplit { 954 // Update syscallsp, syscallpc in case traceback uses them. 955 morebuf := thisg.m.morebuf 956 gp.syscallsp = morebuf.sp 957 gp.syscallpc = morebuf.pc 958 pcname, pcoff := "(unknown)", uintptr(0) 959 f := findfunc(gp.sched.pc) 960 if f.valid() { 961 pcname = funcname(f) 962 pcoff = gp.sched.pc - f.entry 963 } 964 print("runtime: newstack at ", pcname, "+", hex(pcoff), 965 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 966 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 967 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 968 969 thisg.m.traceback = 2 // Include runtime frames 970 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 971 throw("runtime: stack split at bad time") 972 } 973 974 morebuf := thisg.m.morebuf 975 thisg.m.morebuf.pc = 0 976 thisg.m.morebuf.lr = 0 977 thisg.m.morebuf.sp = 0 978 thisg.m.morebuf.g = 0 979 980 // NOTE: stackguard0 may change underfoot, if another thread 981 // is about to try to preempt gp. Read it just once and use that same 982 // value now and below. 983 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 984 985 // Be conservative about where we preempt. 986 // We are interested in preempting user Go code, not runtime code. 987 // If we're holding locks, mallocing, or preemption is disabled, don't 988 // preempt. 989 // This check is very early in newstack so that even the status change 990 // from Grunning to Gwaiting and back doesn't happen in this case. 991 // That status change by itself can be viewed as a small preemption, 992 // because the GC might change Gwaiting to Gscanwaiting, and then 993 // this goroutine has to wait for the GC to finish before continuing. 994 // If the GC is in some way dependent on this goroutine (for example, 995 // it needs a lock held by the goroutine), that small preemption turns 996 // into a real deadlock. 997 if preempt { 998 if !canPreemptM(thisg.m) { 999 // Let the goroutine keep running for now. 1000 // gp->preempt is set, so it will be preempted next time. 1001 gp.stackguard0 = gp.stack.lo + _StackGuard 1002 gogo(&gp.sched) // never return 1003 } 1004 } 1005 1006 if gp.stack.lo == 0 { 1007 throw("missing stack in newstack") 1008 } 1009 sp := gp.sched.sp 1010 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM { 1011 // The call to morestack cost a word. 1012 sp -= sys.PtrSize 1013 } 1014 if stackDebug >= 1 || sp < gp.stack.lo { 1015 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1016 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 1017 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1018 } 1019 if sp < gp.stack.lo { 1020 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ") 1021 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1022 throw("runtime: split stack overflow") 1023 } 1024 1025 if preempt { 1026 if gp == thisg.m.g0 { 1027 throw("runtime: preempt g0") 1028 } 1029 if thisg.m.p == 0 && thisg.m.locks == 0 { 1030 throw("runtime: g is running but p is not") 1031 } 1032 1033 if gp.preemptShrink { 1034 // We're at a synchronous safe point now, so 1035 // do the pending stack shrink. 1036 gp.preemptShrink = false 1037 shrinkstack(gp) 1038 } 1039 1040 if gp.preemptStop { 1041 preemptPark(gp) // never returns 1042 } 1043 1044 // Act like goroutine called runtime.Gosched. 1045 gopreempt_m(gp) // never return 1046 } 1047 1048 // Allocate a bigger segment and move the stack. 1049 oldsize := gp.stack.hi - gp.stack.lo 1050 newsize := oldsize * 2 1051 1052 // Make sure we grow at least as much as needed to fit the new frame. 1053 // (This is just an optimization - the caller of morestack will 1054 // recheck the bounds on return.) 1055 if f := findfunc(gp.sched.pc); f.valid() { 1056 max := uintptr(funcMaxSPDelta(f)) 1057 for newsize-oldsize < max+_StackGuard { 1058 newsize *= 2 1059 } 1060 } 1061 1062 if newsize > maxstacksize || newsize > maxstackceiling { 1063 if maxstacksize < maxstackceiling { 1064 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1065 } else { 1066 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n") 1067 } 1068 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1069 throw("stack overflow") 1070 } 1071 1072 // The goroutine must be executing in order to call newstack, 1073 // so it must be Grunning (or Gscanrunning). 1074 casgstatus(gp, _Grunning, _Gcopystack) 1075 1076 // The concurrent GC will not scan the stack while we are doing the copy since 1077 // the gp is in a Gcopystack status. 1078 copystack(gp, newsize) 1079 if stackDebug >= 1 { 1080 print("stack grow done\n") 1081 } 1082 casgstatus(gp, _Gcopystack, _Grunning) 1083 gogo(&gp.sched) 1084 } 1085 1086 //go:nosplit 1087 func nilfunc() { 1088 *(*uint8)(nil) = 0 1089 } 1090 1091 // adjust Gobuf as if it executed a call to fn 1092 // and then did an immediate gosave. 1093 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1094 var fn unsafe.Pointer 1095 if fv != nil { 1096 fn = unsafe.Pointer(fv.fn) 1097 } else { 1098 fn = unsafe.Pointer(funcPC(nilfunc)) 1099 } 1100 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1101 } 1102 1103 // isShrinkStackSafe returns whether it's safe to attempt to shrink 1104 // gp's stack. Shrinking the stack is only safe when we have precise 1105 // pointer maps for all frames on the stack. 1106 func isShrinkStackSafe(gp *g) bool { 1107 // We can't copy the stack if we're in a syscall. 1108 // The syscall might have pointers into the stack and 1109 // often we don't have precise pointer maps for the innermost 1110 // frames. 1111 // 1112 // We also can't copy the stack if we're at an asynchronous 1113 // safe-point because we don't have precise pointer maps for 1114 // all frames. 1115 // 1116 // We also can't *shrink* the stack in the window between the 1117 // goroutine calling gopark to park on a channel and 1118 // gp.activeStackChans being set. 1119 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0 1120 } 1121 1122 // Maybe shrink the stack being used by gp. 1123 // 1124 // gp must be stopped and we must own its stack. It may be in 1125 // _Grunning, but only if this is our own user G. 1126 func shrinkstack(gp *g) { 1127 if gp.stack.lo == 0 { 1128 throw("missing stack in shrinkstack") 1129 } 1130 if s := readgstatus(gp); s&_Gscan == 0 { 1131 // We don't own the stack via _Gscan. We could still 1132 // own it if this is our own user G and we're on the 1133 // system stack. 1134 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) { 1135 // We don't own the stack. 1136 throw("bad status in shrinkstack") 1137 } 1138 } 1139 if !isShrinkStackSafe(gp) { 1140 throw("shrinkstack at bad time") 1141 } 1142 // Check for self-shrinks while in a libcall. These may have 1143 // pointers into the stack disguised as uintptrs, but these 1144 // code paths should all be nosplit. 1145 if gp == getg().m.curg && gp.m.libcallsp != 0 { 1146 throw("shrinking stack in libcall") 1147 } 1148 1149 if debug.gcshrinkstackoff > 0 { 1150 return 1151 } 1152 f := findfunc(gp.startpc) 1153 if f.valid() && f.funcID == funcID_gcBgMarkWorker { 1154 // We're not allowed to shrink the gcBgMarkWorker 1155 // stack (see gcBgMarkWorker for explanation). 1156 return 1157 } 1158 1159 oldsize := gp.stack.hi - gp.stack.lo 1160 newsize := oldsize / 2 1161 // Don't shrink the allocation below the minimum-sized stack 1162 // allocation. 1163 if newsize < _FixedStack { 1164 return 1165 } 1166 // Compute how much of the stack is currently in use and only 1167 // shrink the stack if gp is using less than a quarter of its 1168 // current stack. The currently used stack includes everything 1169 // down to the SP plus the stack guard space that ensures 1170 // there's room for nosplit functions. 1171 avail := gp.stack.hi - gp.stack.lo 1172 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1173 return 1174 } 1175 1176 if stackDebug > 0 { 1177 print("shrinking stack ", oldsize, "->", newsize, "\n") 1178 } 1179 1180 copystack(gp, newsize) 1181 } 1182 1183 // freeStackSpans frees unused stack spans at the end of GC. 1184 func freeStackSpans() { 1185 1186 // Scan stack pools for empty stack spans. 1187 for order := range stackpool { 1188 lock(&stackpool[order].item.mu) 1189 list := &stackpool[order].item.span 1190 for s := list.first; s != nil; { 1191 next := s.next 1192 if s.allocCount == 0 { 1193 list.remove(s) 1194 s.manualFreeList = 0 1195 osStackFree(s) 1196 mheap_.freeManual(s, spanAllocStack) 1197 } 1198 s = next 1199 } 1200 unlock(&stackpool[order].item.mu) 1201 } 1202 1203 // Free large stack spans. 1204 lock(&stackLarge.lock) 1205 for i := range stackLarge.free { 1206 for s := stackLarge.free[i].first; s != nil; { 1207 next := s.next 1208 stackLarge.free[i].remove(s) 1209 osStackFree(s) 1210 mheap_.freeManual(s, spanAllocStack) 1211 s = next 1212 } 1213 } 1214 unlock(&stackLarge.lock) 1215 } 1216 1217 // getStackMap returns the locals and arguments live pointer maps, and 1218 // stack object list for frame. 1219 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) { 1220 targetpc := frame.continpc 1221 if targetpc == 0 { 1222 // Frame is dead. Return empty bitvectors. 1223 return 1224 } 1225 1226 f := frame.fn 1227 pcdata := int32(-1) 1228 if targetpc != f.entry { 1229 // Back up to the CALL. If we're at the function entry 1230 // point, we want to use the entry map (-1), even if 1231 // the first instruction of the function changes the 1232 // stack map. 1233 targetpc-- 1234 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 1235 } 1236 if pcdata == -1 { 1237 // We do not have a valid pcdata value but there might be a 1238 // stackmap for this function. It is likely that we are looking 1239 // at the function prologue, assume so and hope for the best. 1240 pcdata = 0 1241 } 1242 1243 // Local variables. 1244 size := frame.varp - frame.sp 1245 var minsize uintptr 1246 switch sys.ArchFamily { 1247 case sys.ARM64: 1248 minsize = sys.SpAlign 1249 default: 1250 minsize = sys.MinFrameSize 1251 } 1252 if size > minsize { 1253 stackid := pcdata 1254 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 1255 if stkmap == nil || stkmap.n <= 0 { 1256 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 1257 throw("missing stackmap") 1258 } 1259 // If nbit == 0, there's no work to do. 1260 if stkmap.nbit > 0 { 1261 if stackid < 0 || stackid >= stkmap.n { 1262 // don't know where we are 1263 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1264 throw("bad symbol table") 1265 } 1266 locals = stackmapdata(stkmap, stackid) 1267 if stackDebug >= 3 && debug { 1268 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n") 1269 } 1270 } else if stackDebug >= 3 && debug { 1271 print(" no locals to adjust\n") 1272 } 1273 } 1274 1275 // Arguments. 1276 if frame.arglen > 0 { 1277 if frame.argmap != nil { 1278 // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. 1279 // In this case, arglen specifies how much of the args section is actually live. 1280 // (It could be either all the args + results, or just the args.) 1281 args = *frame.argmap 1282 n := int32(frame.arglen / sys.PtrSize) 1283 if n < args.n { 1284 args.n = n // Don't use more of the arguments than arglen. 1285 } 1286 } else { 1287 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 1288 if stackmap == nil || stackmap.n <= 0 { 1289 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 1290 throw("missing stackmap") 1291 } 1292 if pcdata < 0 || pcdata >= stackmap.n { 1293 // don't know where we are 1294 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1295 throw("bad symbol table") 1296 } 1297 if stackmap.nbit > 0 { 1298 args = stackmapdata(stackmap, pcdata) 1299 } 1300 } 1301 } 1302 1303 // stack objects. 1304 p := funcdata(f, _FUNCDATA_StackObjects) 1305 if p != nil { 1306 n := *(*uintptr)(p) 1307 p = add(p, sys.PtrSize) 1308 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} 1309 // Note: the noescape above is needed to keep 1310 // getStackMap from "leaking param content: 1311 // frame". That leak propagates up to getgcmask, then 1312 // GCMask, then verifyGCInfo, which converts the stack 1313 // gcinfo tests into heap gcinfo tests :( 1314 } 1315 1316 return 1317 } 1318 1319 // A stackObjectRecord is generated by the compiler for each stack object in a stack frame. 1320 // This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects. 1321 type stackObjectRecord struct { 1322 // offset in frame 1323 // if negative, offset from varp 1324 // if non-negative, offset from argp 1325 off int 1326 typ *_type 1327 } 1328 1329 // This is exported as ABI0 via linkname so obj can call it. 1330 // 1331 //go:nosplit 1332 //go:linkname morestackc 1333 func morestackc() { 1334 throw("attempt to execute system stack code on user stack") 1335 }