github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and iOS because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 const ( 106 // stackDebug == 0: no logging 107 // == 1: logging of per-stack operations 108 // == 2: logging of per-frame operations 109 // == 3: logging of per-word updates 110 // == 4: logging of per-word reads 111 stackDebug = 0 112 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 113 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 114 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 115 stackNoCache = 0 // disable per-P small stack caches 116 117 // check the BP links during traceback. 118 debugCheckBP = false 119 ) 120 121 const ( 122 uintptrMask = 1<<(8*sys.PtrSize) - 1 123 124 // Goroutine preemption request. 125 // Stored into g->stackguard0 to cause split stack check failure. 126 // Must be greater than any real sp. 127 // 0xfffffade in hex. 128 stackPreempt = uintptrMask & -1314 129 130 // Thread is forking. 131 // Stored into g->stackguard0 to cause split stack check failure. 132 // Must be greater than any real sp. 133 stackFork = uintptrMask & -1234 134 ) 135 136 // Global pool of spans that have free stacks. 137 // Stacks are assigned an order according to size. 138 // order = log_2(size/FixedStack) 139 // There is a free list for each order. 140 // TODO: one lock per order? 141 var stackpool [_NumStackOrders]mSpanList 142 var stackpoolmu mutex 143 144 // Global pool of large stack spans. 145 var stackLarge struct { 146 lock mutex 147 free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages) 148 } 149 150 func stackinit() { 151 if _StackCacheSize&_PageMask != 0 { 152 throw("cache size must be a multiple of page size") 153 } 154 for i := range stackpool { 155 stackpool[i].init() 156 } 157 for i := range stackLarge.free { 158 stackLarge.free[i].init() 159 } 160 } 161 162 // stacklog2 returns ⌊log_2(n)⌋. 163 func stacklog2(n uintptr) int { 164 log2 := 0 165 for n > 1 { 166 n >>= 1 167 log2++ 168 } 169 return log2 170 } 171 172 // Allocates a stack from the free pool. Must be called with 173 // stackpoolmu held. 174 func stackpoolalloc(order uint8) gclinkptr { 175 list := &stackpool[order] 176 s := list.first 177 if s == nil { 178 // no free stacks. Allocate another span worth. 179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) 180 if s == nil { 181 throw("out of memory") 182 } 183 if s.allocCount != 0 { 184 throw("bad allocCount") 185 } 186 if s.manualFreeList.ptr() != nil { 187 throw("bad manualFreeList") 188 } 189 osStackAlloc(s) 190 s.elemsize = _FixedStack << order 191 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 192 x := gclinkptr(s.base() + i) 193 x.ptr().next = s.manualFreeList 194 s.manualFreeList = x 195 } 196 list.insert(s) 197 } 198 x := s.manualFreeList 199 if x.ptr() == nil { 200 throw("span has no free stacks") 201 } 202 s.manualFreeList = x.ptr().next 203 s.allocCount++ 204 if s.manualFreeList.ptr() == nil { 205 // all stacks in s are allocated. 206 list.remove(s) 207 } 208 return x 209 } 210 211 // Adds stack x to the free pool. Must be called with stackpoolmu held. 212 func stackpoolfree(x gclinkptr, order uint8) { 213 s := spanOfUnchecked(uintptr(x)) 214 if s.state != mSpanManual { 215 throw("freeing stack not in a stack span") 216 } 217 if s.manualFreeList.ptr() == nil { 218 // s will now have a free stack 219 stackpool[order].insert(s) 220 } 221 x.ptr().next = s.manualFreeList 222 s.manualFreeList = x 223 s.allocCount-- 224 if gcphase == _GCoff && s.allocCount == 0 { 225 // Span is completely free. Return it to the heap 226 // immediately if we're sweeping. 227 // 228 // If GC is active, we delay the free until the end of 229 // GC to avoid the following type of situation: 230 // 231 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 232 // 2) The stack that pointer points to is copied 233 // 3) The old stack is freed 234 // 4) The containing span is marked free 235 // 5) GC attempts to mark the SudoG.elem pointer. The 236 // marking fails because the pointer looks like a 237 // pointer into a free span. 238 // 239 // By not freeing, we prevent step #4 until GC is done. 240 stackpool[order].remove(s) 241 s.manualFreeList = 0 242 osStackFree(s) 243 mheap_.freeManual(s, &memstats.stacks_inuse) 244 } 245 } 246 247 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 248 // The pool is required to prevent unlimited growth of per-thread caches. 249 // 250 //go:systemstack 251 func stackcacherefill(c *mcache, order uint8) { 252 if stackDebug >= 1 { 253 print("stackcacherefill order=", order, "\n") 254 } 255 256 // Grab some stacks from the global cache. 257 // Grab half of the allowed capacity (to prevent thrashing). 258 var list gclinkptr 259 var size uintptr 260 lock(&stackpoolmu) 261 for size < _StackCacheSize/2 { 262 x := stackpoolalloc(order) 263 x.ptr().next = list 264 list = x 265 size += _FixedStack << order 266 } 267 unlock(&stackpoolmu) 268 c.stackcache[order].list = list 269 c.stackcache[order].size = size 270 } 271 272 //go:systemstack 273 func stackcacherelease(c *mcache, order uint8) { 274 if stackDebug >= 1 { 275 print("stackcacherelease order=", order, "\n") 276 } 277 x := c.stackcache[order].list 278 size := c.stackcache[order].size 279 lock(&stackpoolmu) 280 for size > _StackCacheSize/2 { 281 y := x.ptr().next 282 stackpoolfree(x, order) 283 x = y 284 size -= _FixedStack << order 285 } 286 unlock(&stackpoolmu) 287 c.stackcache[order].list = x 288 c.stackcache[order].size = size 289 } 290 291 //go:systemstack 292 func stackcache_clear(c *mcache) { 293 if stackDebug >= 1 { 294 print("stackcache clear\n") 295 } 296 lock(&stackpoolmu) 297 for order := uint8(0); order < _NumStackOrders; order++ { 298 x := c.stackcache[order].list 299 for x.ptr() != nil { 300 y := x.ptr().next 301 stackpoolfree(x, order) 302 x = y 303 } 304 c.stackcache[order].list = 0 305 c.stackcache[order].size = 0 306 } 307 unlock(&stackpoolmu) 308 } 309 310 // stackalloc allocates an n byte stack. 311 // 312 // stackalloc must run on the system stack because it uses per-P 313 // resources and must not split the stack. 314 // 315 //go:systemstack 316 func stackalloc(n uint32) stack { 317 // Stackalloc must be called on scheduler stack, so that we 318 // never try to grow the stack during the code that stackalloc runs. 319 // Doing so would cause a deadlock (issue 1547). 320 thisg := getg() 321 if thisg != thisg.m.g0 { 322 throw("stackalloc not on scheduler stack") 323 } 324 if n&(n-1) != 0 { 325 throw("stack size not a power of 2") 326 } 327 if stackDebug >= 1 { 328 print("stackalloc ", n, "\n") 329 } 330 331 if debug.efence != 0 || stackFromSystem != 0 { 332 n = uint32(round(uintptr(n), physPageSize)) 333 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 334 if v == nil { 335 throw("out of memory (stackalloc)") 336 } 337 return stack{uintptr(v), uintptr(v) + uintptr(n)} 338 } 339 340 // Small stacks are allocated with a fixed-size free-list allocator. 341 // If we need a stack of a bigger size, we fall back on allocating 342 // a dedicated span. 343 var v unsafe.Pointer 344 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 345 order := uint8(0) 346 n2 := n 347 for n2 > _FixedStack { 348 order++ 349 n2 >>= 1 350 } 351 var x gclinkptr 352 c := thisg.m.mcache 353 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" { 354 // c == nil can happen in the guts of exitsyscall or 355 // procresize. Just get a stack from the global pool. 356 // Also don't touch stackcache during gc 357 // as it's flushed concurrently. 358 lock(&stackpoolmu) 359 x = stackpoolalloc(order) 360 unlock(&stackpoolmu) 361 } else { 362 x = c.stackcache[order].list 363 if x.ptr() == nil { 364 stackcacherefill(c, order) 365 x = c.stackcache[order].list 366 } 367 c.stackcache[order].list = x.ptr().next 368 c.stackcache[order].size -= uintptr(n) 369 } 370 v = unsafe.Pointer(x) 371 } else { 372 var s *mspan 373 npage := uintptr(n) >> _PageShift 374 log2npage := stacklog2(npage) 375 376 // Try to get a stack from the large stack cache. 377 lock(&stackLarge.lock) 378 if !stackLarge.free[log2npage].isEmpty() { 379 s = stackLarge.free[log2npage].first 380 stackLarge.free[log2npage].remove(s) 381 } 382 unlock(&stackLarge.lock) 383 384 if s == nil { 385 // Allocate a new stack from the heap. 386 s = mheap_.allocManual(npage, &memstats.stacks_inuse) 387 if s == nil { 388 throw("out of memory") 389 } 390 osStackAlloc(s) 391 s.elemsize = uintptr(n) 392 } 393 v = unsafe.Pointer(s.base()) 394 } 395 396 if raceenabled { 397 racemalloc(v, uintptr(n)) 398 } 399 if msanenabled { 400 msanmalloc(v, uintptr(n)) 401 } 402 if stackDebug >= 1 { 403 print(" allocated ", v, "\n") 404 } 405 return stack{uintptr(v), uintptr(v) + uintptr(n)} 406 } 407 408 // stackfree frees an n byte stack allocation at stk. 409 // 410 // stackfree must run on the system stack because it uses per-P 411 // resources and must not split the stack. 412 // 413 //go:systemstack 414 func stackfree(stk stack) { 415 gp := getg() 416 v := unsafe.Pointer(stk.lo) 417 n := stk.hi - stk.lo 418 if n&(n-1) != 0 { 419 throw("stack not a power of 2") 420 } 421 if stk.lo+n < stk.hi { 422 throw("bad stack size") 423 } 424 if stackDebug >= 1 { 425 println("stackfree", v, n) 426 memclrNoHeapPointers(v, n) // for testing, clobber stack data 427 } 428 if debug.efence != 0 || stackFromSystem != 0 { 429 if debug.efence != 0 || stackFaultOnFree != 0 { 430 sysFault(v, n) 431 } else { 432 sysFree(v, n, &memstats.stacks_sys) 433 } 434 return 435 } 436 if msanenabled { 437 msanfree(v, n) 438 } 439 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 440 order := uint8(0) 441 n2 := n 442 for n2 > _FixedStack { 443 order++ 444 n2 >>= 1 445 } 446 x := gclinkptr(v) 447 c := gp.m.mcache 448 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" { 449 lock(&stackpoolmu) 450 stackpoolfree(x, order) 451 unlock(&stackpoolmu) 452 } else { 453 if c.stackcache[order].size >= _StackCacheSize { 454 stackcacherelease(c, order) 455 } 456 x.ptr().next = c.stackcache[order].list 457 c.stackcache[order].list = x 458 c.stackcache[order].size += n 459 } 460 } else { 461 s := spanOfUnchecked(uintptr(v)) 462 if s.state != mSpanManual { 463 println(hex(s.base()), v) 464 throw("bad span state") 465 } 466 if gcphase == _GCoff { 467 // Free the stack immediately if we're 468 // sweeping. 469 osStackFree(s) 470 mheap_.freeManual(s, &memstats.stacks_inuse) 471 } else { 472 // If the GC is running, we can't return a 473 // stack span to the heap because it could be 474 // reused as a heap span, and this state 475 // change would race with GC. Add it to the 476 // large stack cache instead. 477 log2npage := stacklog2(s.npages) 478 lock(&stackLarge.lock) 479 stackLarge.free[log2npage].insert(s) 480 unlock(&stackLarge.lock) 481 } 482 } 483 } 484 485 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 486 487 var ptrnames = []string{ 488 0: "scalar", 489 1: "ptr", 490 } 491 492 // Stack frame layout 493 // 494 // (x86) 495 // +------------------+ 496 // | args from caller | 497 // +------------------+ <- frame->argp 498 // | return address | 499 // +------------------+ 500 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 501 // +------------------+ <- frame->varp 502 // | locals | 503 // +------------------+ 504 // | args to callee | 505 // +------------------+ <- frame->sp 506 // 507 // (arm) 508 // +------------------+ 509 // | args from caller | 510 // +------------------+ <- frame->argp 511 // | caller's retaddr | 512 // +------------------+ <- frame->varp 513 // | locals | 514 // +------------------+ 515 // | args to callee | 516 // +------------------+ 517 // | return address | 518 // +------------------+ <- frame->sp 519 520 type adjustinfo struct { 521 old stack 522 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 523 cache pcvalueCache 524 525 // sghi is the highest sudog.elem on the stack. 526 sghi uintptr 527 } 528 529 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 530 // If so, it rewrites *vpp to point into the new stack. 531 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 532 pp := (*uintptr)(vpp) 533 p := *pp 534 if stackDebug >= 4 { 535 print(" ", pp, ":", hex(p), "\n") 536 } 537 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 538 *pp = p + adjinfo.delta 539 if stackDebug >= 3 { 540 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 541 } 542 } 543 } 544 545 // Information from the compiler about the layout of stack frames. 546 type bitvector struct { 547 n int32 // # of bits 548 bytedata *uint8 549 } 550 551 // ptrbit returns the i'th bit in bv. 552 // ptrbit is less efficient than iterating directly over bitvector bits, 553 // and should only be used in non-performance-critical code. 554 // See adjustpointers for an example of a high-efficiency walk of a bitvector. 555 func (bv *bitvector) ptrbit(i uintptr) uint8 { 556 b := *(addb(bv.bytedata, i/8)) 557 return (b >> (i % 8)) & 1 558 } 559 560 // bv describes the memory starting at address scanp. 561 // Adjust any pointers contained therein. 562 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 563 minp := adjinfo.old.lo 564 maxp := adjinfo.old.hi 565 delta := adjinfo.delta 566 num := uintptr(bv.n) 567 // If this frame might contain channel receive slots, use CAS 568 // to adjust pointers. If the slot hasn't been received into 569 // yet, it may contain stack pointers and a concurrent send 570 // could race with adjusting those pointers. (The sent value 571 // itself can never contain stack pointers.) 572 useCAS := uintptr(scanp) < adjinfo.sghi 573 for i := uintptr(0); i < num; i += 8 { 574 if stackDebug >= 4 { 575 for j := uintptr(0); j < 8; j++ { 576 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 577 } 578 } 579 b := *(addb(bv.bytedata, i/8)) 580 for b != 0 { 581 j := uintptr(sys.Ctz8(b)) 582 b &= b - 1 583 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize)) 584 retry: 585 p := *pp 586 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 587 // Looks like a junk value in a pointer slot. 588 // Live analysis wrong? 589 getg().m.traceback = 2 590 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 591 throw("invalid pointer found on stack") 592 } 593 if minp <= p && p < maxp { 594 if stackDebug >= 3 { 595 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 596 } 597 if useCAS { 598 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 599 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 600 goto retry 601 } 602 } else { 603 *pp = p + delta 604 } 605 } 606 } 607 } 608 } 609 610 // Note: the argument/return area is adjusted by the callee. 611 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 612 adjinfo := (*adjustinfo)(arg) 613 if frame.continpc == 0 { 614 // Frame is dead. 615 return true 616 } 617 f := frame.fn 618 if stackDebug >= 2 { 619 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 620 } 621 if f.funcID == funcID_systemstack_switch { 622 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 623 // We will allow it to be copied even though we don't 624 // have full GC info for it (because it is written in asm). 625 return true 626 } 627 628 locals, args, objs := getStackMap(frame, &adjinfo.cache, true) 629 630 // Adjust local variables if stack frame has been allocated. 631 if locals.n > 0 { 632 size := uintptr(locals.n) * sys.PtrSize 633 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 634 } 635 636 // Adjust saved base pointer if there is one. 637 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 638 if !framepointer_enabled { 639 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 640 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 641 throw("bad frame layout") 642 } 643 if stackDebug >= 3 { 644 print(" saved bp\n") 645 } 646 if debugCheckBP { 647 // Frame pointers should always point to the next higher frame on 648 // the Go stack (or be nil, for the top frame on the stack). 649 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 650 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 651 println("runtime: found invalid frame pointer") 652 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 653 throw("bad frame pointer") 654 } 655 } 656 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 657 } 658 659 // Adjust arguments. 660 if args.n > 0 { 661 if stackDebug >= 3 { 662 print(" args\n") 663 } 664 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 665 } 666 667 // Adjust pointers in all stack objects (whether they are live or not). 668 // See comments in mgcmark.go:scanframeworker. 669 if frame.varp != 0 { 670 for _, obj := range objs { 671 off := obj.off 672 base := frame.varp // locals base pointer 673 if off >= 0 { 674 base = frame.argp // arguments and return values base pointer 675 } 676 p := base + uintptr(off) 677 if p < frame.sp { 678 // Object hasn't been allocated in the frame yet. 679 // (Happens when the stack bounds check fails and 680 // we call into morestack.) 681 continue 682 } 683 t := obj.typ 684 gcdata := t.gcdata 685 var s *mspan 686 if t.kind&kindGCProg != 0 { 687 // See comments in mgcmark.go:scanstack 688 s = materializeGCProg(t.ptrdata, gcdata) 689 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 690 } 691 for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize { 692 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { 693 adjustpointer(adjinfo, unsafe.Pointer(p+i)) 694 } 695 } 696 if s != nil { 697 dematerializeGCProg(s) 698 } 699 } 700 } 701 702 return true 703 } 704 705 func adjustctxt(gp *g, adjinfo *adjustinfo) { 706 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 707 if !framepointer_enabled { 708 return 709 } 710 if debugCheckBP { 711 bp := gp.sched.bp 712 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 713 println("runtime: found invalid top frame pointer") 714 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 715 throw("bad top frame pointer") 716 } 717 } 718 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 719 } 720 721 func adjustdefers(gp *g, adjinfo *adjustinfo) { 722 // Adjust defer argument blocks the same way we adjust active stack frames. 723 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 724 725 // Adjust pointers in the Defer structs. 726 // Defer structs themselves are never on the stack. 727 for d := gp._defer; d != nil; d = d.link { 728 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 729 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 730 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 731 } 732 } 733 734 func adjustpanics(gp *g, adjinfo *adjustinfo) { 735 // Panics are on stack and already adjusted. 736 // Update pointer to head of list in G. 737 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 738 } 739 740 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 741 // the data elements pointed to by a SudoG structure 742 // might be in the stack. 743 for s := gp.waiting; s != nil; s = s.waitlink { 744 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 745 } 746 } 747 748 func fillstack(stk stack, b byte) { 749 for p := stk.lo; p < stk.hi; p++ { 750 *(*byte)(unsafe.Pointer(p)) = b 751 } 752 } 753 754 func findsghi(gp *g, stk stack) uintptr { 755 var sghi uintptr 756 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 757 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 758 if stk.lo <= p && p < stk.hi && p > sghi { 759 sghi = p 760 } 761 } 762 return sghi 763 } 764 765 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 766 // stack they refer to while synchronizing with concurrent channel 767 // operations. It returns the number of bytes of stack copied. 768 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 769 if gp.waiting == nil { 770 return 0 771 } 772 773 // Lock channels to prevent concurrent send/receive. 774 // It's important that we *only* do this for async 775 // copystack; otherwise, gp may be in the middle of 776 // putting itself on wait queues and this would 777 // self-deadlock. 778 var lastc *hchan 779 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 780 if sg.c != lastc { 781 lock(&sg.c.lock) 782 } 783 lastc = sg.c 784 } 785 786 // Adjust sudogs. 787 adjustsudogs(gp, adjinfo) 788 789 // Copy the part of the stack the sudogs point in to 790 // while holding the lock to prevent races on 791 // send/receive slots. 792 var sgsize uintptr 793 if adjinfo.sghi != 0 { 794 oldBot := adjinfo.old.hi - used 795 newBot := oldBot + adjinfo.delta 796 sgsize = adjinfo.sghi - oldBot 797 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 798 } 799 800 // Unlock channels. 801 lastc = nil 802 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 803 if sg.c != lastc { 804 unlock(&sg.c.lock) 805 } 806 lastc = sg.c 807 } 808 809 return sgsize 810 } 811 812 // Copies gp's stack to a new stack of a different size. 813 // Caller must have changed gp status to Gcopystack. 814 // 815 // If sync is true, this is a self-triggered stack growth and, in 816 // particular, no other G may be writing to gp's stack (e.g., via a 817 // channel operation). If sync is false, copystack protects against 818 // concurrent channel operations. 819 func copystack(gp *g, newsize uintptr, sync bool) { 820 if gp.syscallsp != 0 { 821 throw("stack growth not allowed in system call") 822 } 823 old := gp.stack 824 if old.lo == 0 { 825 throw("nil stackbase") 826 } 827 used := old.hi - gp.sched.sp 828 829 // allocate new stack 830 new := stackalloc(uint32(newsize)) 831 if stackPoisonCopy != 0 { 832 fillstack(new, 0xfd) 833 } 834 if stackDebug >= 1 { 835 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 836 } 837 838 // Compute adjustment. 839 var adjinfo adjustinfo 840 adjinfo.old = old 841 adjinfo.delta = new.hi - old.hi 842 843 // Adjust sudogs, synchronizing with channel ops if necessary. 844 ncopy := used 845 if sync { 846 adjustsudogs(gp, &adjinfo) 847 } else { 848 // sudogs can point in to the stack. During concurrent 849 // shrinking, these areas may be written to. Find the 850 // highest such pointer so we can handle everything 851 // there and below carefully. (This shouldn't be far 852 // from the bottom of the stack, so there's little 853 // cost in handling everything below it carefully.) 854 adjinfo.sghi = findsghi(gp, old) 855 856 // Synchronize with channel ops and copy the part of 857 // the stack they may interact with. 858 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 859 } 860 861 // Copy the stack (or the rest of it) to the new location 862 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 863 864 // Adjust remaining structures that have pointers into stacks. 865 // We have to do most of these before we traceback the new 866 // stack because gentraceback uses them. 867 adjustctxt(gp, &adjinfo) 868 adjustdefers(gp, &adjinfo) 869 adjustpanics(gp, &adjinfo) 870 if adjinfo.sghi != 0 { 871 adjinfo.sghi += adjinfo.delta 872 } 873 874 // Swap out old stack for new one 875 gp.stack = new 876 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 877 gp.sched.sp = new.hi - used 878 gp.stktopsp += adjinfo.delta 879 880 // Adjust pointers in the new stack. 881 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 882 883 // free old stack 884 if stackPoisonCopy != 0 { 885 fillstack(old, 0xfc) 886 } 887 stackfree(old) 888 } 889 890 // round x up to a power of 2. 891 func round2(x int32) int32 { 892 s := uint(0) 893 for 1<<s < x { 894 s++ 895 } 896 return 1 << s 897 } 898 899 // Called from runtime·morestack when more stack is needed. 900 // Allocate larger stack and relocate to new stack. 901 // Stack growth is multiplicative, for constant amortized cost. 902 // 903 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 904 // If the GC is trying to stop this g then it will set preemptscan to true. 905 // 906 // This must be nowritebarrierrec because it can be called as part of 907 // stack growth from other nowritebarrierrec functions, but the 908 // compiler doesn't check this. 909 // 910 //go:nowritebarrierrec 911 func newstack() { 912 thisg := getg() 913 // TODO: double check all gp. shouldn't be getg(). 914 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 915 throw("stack growth after fork") 916 } 917 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 918 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 919 morebuf := thisg.m.morebuf 920 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 921 throw("runtime: wrong goroutine in newstack") 922 } 923 924 gp := thisg.m.curg 925 926 if thisg.m.curg.throwsplit { 927 // Update syscallsp, syscallpc in case traceback uses them. 928 morebuf := thisg.m.morebuf 929 gp.syscallsp = morebuf.sp 930 gp.syscallpc = morebuf.pc 931 pcname, pcoff := "(unknown)", uintptr(0) 932 f := findfunc(gp.sched.pc) 933 if f.valid() { 934 pcname = funcname(f) 935 pcoff = gp.sched.pc - f.entry 936 } 937 print("runtime: newstack at ", pcname, "+", hex(pcoff), 938 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 939 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 940 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 941 942 thisg.m.traceback = 2 // Include runtime frames 943 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 944 throw("runtime: stack split at bad time") 945 } 946 947 morebuf := thisg.m.morebuf 948 thisg.m.morebuf.pc = 0 949 thisg.m.morebuf.lr = 0 950 thisg.m.morebuf.sp = 0 951 thisg.m.morebuf.g = 0 952 953 // NOTE: stackguard0 may change underfoot, if another thread 954 // is about to try to preempt gp. Read it just once and use that same 955 // value now and below. 956 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 957 958 // Be conservative about where we preempt. 959 // We are interested in preempting user Go code, not runtime code. 960 // If we're holding locks, mallocing, or preemption is disabled, don't 961 // preempt. 962 // This check is very early in newstack so that even the status change 963 // from Grunning to Gwaiting and back doesn't happen in this case. 964 // That status change by itself can be viewed as a small preemption, 965 // because the GC might change Gwaiting to Gscanwaiting, and then 966 // this goroutine has to wait for the GC to finish before continuing. 967 // If the GC is in some way dependent on this goroutine (for example, 968 // it needs a lock held by the goroutine), that small preemption turns 969 // into a real deadlock. 970 if preempt { 971 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 972 // Let the goroutine keep running for now. 973 // gp->preempt is set, so it will be preempted next time. 974 gp.stackguard0 = gp.stack.lo + _StackGuard 975 gogo(&gp.sched) // never return 976 } 977 } 978 979 if gp.stack.lo == 0 { 980 throw("missing stack in newstack") 981 } 982 sp := gp.sched.sp 983 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM { 984 // The call to morestack cost a word. 985 sp -= sys.PtrSize 986 } 987 if stackDebug >= 1 || sp < gp.stack.lo { 988 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 989 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 990 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 991 } 992 if sp < gp.stack.lo { 993 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ") 994 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 995 throw("runtime: split stack overflow") 996 } 997 998 if preempt { 999 if gp == thisg.m.g0 { 1000 throw("runtime: preempt g0") 1001 } 1002 if thisg.m.p == 0 && thisg.m.locks == 0 { 1003 throw("runtime: g is running but p is not") 1004 } 1005 // Synchronize with scang. 1006 casgstatus(gp, _Grunning, _Gwaiting) 1007 if gp.preemptscan { 1008 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1009 // Likely to be racing with the GC as 1010 // it sees a _Gwaiting and does the 1011 // stack scan. If so, gcworkdone will 1012 // be set and gcphasework will simply 1013 // return. 1014 } 1015 if !gp.gcscandone { 1016 // gcw is safe because we're on the 1017 // system stack. 1018 gcw := &gp.m.p.ptr().gcw 1019 scanstack(gp, gcw) 1020 gp.gcscandone = true 1021 } 1022 gp.preemptscan = false 1023 gp.preempt = false 1024 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1025 // This clears gcscanvalid. 1026 casgstatus(gp, _Gwaiting, _Grunning) 1027 gp.stackguard0 = gp.stack.lo + _StackGuard 1028 gogo(&gp.sched) // never return 1029 } 1030 1031 // Act like goroutine called runtime.Gosched. 1032 casgstatus(gp, _Gwaiting, _Grunning) 1033 gopreempt_m(gp) // never return 1034 } 1035 1036 // Allocate a bigger segment and move the stack. 1037 oldsize := gp.stack.hi - gp.stack.lo 1038 newsize := oldsize * 2 1039 if newsize > maxstacksize { 1040 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1041 throw("stack overflow") 1042 } 1043 1044 // The goroutine must be executing in order to call newstack, 1045 // so it must be Grunning (or Gscanrunning). 1046 casgstatus(gp, _Grunning, _Gcopystack) 1047 1048 // The concurrent GC will not scan the stack while we are doing the copy since 1049 // the gp is in a Gcopystack status. 1050 copystack(gp, newsize, true) 1051 if stackDebug >= 1 { 1052 print("stack grow done\n") 1053 } 1054 casgstatus(gp, _Gcopystack, _Grunning) 1055 gogo(&gp.sched) 1056 } 1057 1058 //go:nosplit 1059 func nilfunc() { 1060 *(*uint8)(nil) = 0 1061 } 1062 1063 // adjust Gobuf as if it executed a call to fn 1064 // and then did an immediate gosave. 1065 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1066 var fn unsafe.Pointer 1067 if fv != nil { 1068 fn = unsafe.Pointer(fv.fn) 1069 } else { 1070 fn = unsafe.Pointer(funcPC(nilfunc)) 1071 } 1072 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1073 } 1074 1075 // Maybe shrink the stack being used by gp. 1076 // Called at garbage collection time. 1077 // gp must be stopped, but the world need not be. 1078 func shrinkstack(gp *g) { 1079 gstatus := readgstatus(gp) 1080 if gstatus&^_Gscan == _Gdead { 1081 if gp.stack.lo != 0 { 1082 // Free whole stack - it will get reallocated 1083 // if G is used again. 1084 stackfree(gp.stack) 1085 gp.stack.lo = 0 1086 gp.stack.hi = 0 1087 } 1088 return 1089 } 1090 if gp.stack.lo == 0 { 1091 throw("missing stack in shrinkstack") 1092 } 1093 if gstatus&_Gscan == 0 { 1094 throw("bad status in shrinkstack") 1095 } 1096 1097 if debug.gcshrinkstackoff > 0 { 1098 return 1099 } 1100 f := findfunc(gp.startpc) 1101 if f.valid() && f.funcID == funcID_gcBgMarkWorker { 1102 // We're not allowed to shrink the gcBgMarkWorker 1103 // stack (see gcBgMarkWorker for explanation). 1104 return 1105 } 1106 1107 oldsize := gp.stack.hi - gp.stack.lo 1108 newsize := oldsize / 2 1109 // Don't shrink the allocation below the minimum-sized stack 1110 // allocation. 1111 if newsize < _FixedStack { 1112 return 1113 } 1114 // Compute how much of the stack is currently in use and only 1115 // shrink the stack if gp is using less than a quarter of its 1116 // current stack. The currently used stack includes everything 1117 // down to the SP plus the stack guard space that ensures 1118 // there's room for nosplit functions. 1119 avail := gp.stack.hi - gp.stack.lo 1120 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1121 return 1122 } 1123 1124 // We can't copy the stack if we're in a syscall. 1125 // The syscall might have pointers into the stack. 1126 if gp.syscallsp != 0 { 1127 return 1128 } 1129 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1130 return 1131 } 1132 1133 if stackDebug > 0 { 1134 print("shrinking stack ", oldsize, "->", newsize, "\n") 1135 } 1136 1137 copystack(gp, newsize, false) 1138 } 1139 1140 // freeStackSpans frees unused stack spans at the end of GC. 1141 func freeStackSpans() { 1142 lock(&stackpoolmu) 1143 1144 // Scan stack pools for empty stack spans. 1145 for order := range stackpool { 1146 list := &stackpool[order] 1147 for s := list.first; s != nil; { 1148 next := s.next 1149 if s.allocCount == 0 { 1150 list.remove(s) 1151 s.manualFreeList = 0 1152 osStackFree(s) 1153 mheap_.freeManual(s, &memstats.stacks_inuse) 1154 } 1155 s = next 1156 } 1157 } 1158 1159 unlock(&stackpoolmu) 1160 1161 // Free large stack spans. 1162 lock(&stackLarge.lock) 1163 for i := range stackLarge.free { 1164 for s := stackLarge.free[i].first; s != nil; { 1165 next := s.next 1166 stackLarge.free[i].remove(s) 1167 osStackFree(s) 1168 mheap_.freeManual(s, &memstats.stacks_inuse) 1169 s = next 1170 } 1171 } 1172 unlock(&stackLarge.lock) 1173 } 1174 1175 // getStackMap returns the locals and arguments live pointer maps, and 1176 // stack object list for frame. 1177 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) { 1178 targetpc := frame.continpc 1179 if targetpc == 0 { 1180 // Frame is dead. Return empty bitvectors. 1181 return 1182 } 1183 1184 f := frame.fn 1185 pcdata := int32(-1) 1186 if targetpc != f.entry { 1187 // Back up to the CALL. If we're at the function entry 1188 // point, we want to use the entry map (-1), even if 1189 // the first instruction of the function changes the 1190 // stack map. 1191 targetpc-- 1192 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 1193 } 1194 if pcdata == -1 { 1195 // We do not have a valid pcdata value but there might be a 1196 // stackmap for this function. It is likely that we are looking 1197 // at the function prologue, assume so and hope for the best. 1198 pcdata = 0 1199 } 1200 1201 // Local variables. 1202 size := frame.varp - frame.sp 1203 var minsize uintptr 1204 switch sys.ArchFamily { 1205 case sys.ARM64: 1206 minsize = sys.SpAlign 1207 default: 1208 minsize = sys.MinFrameSize 1209 } 1210 if size > minsize { 1211 var stkmap *stackmap 1212 stackid := pcdata 1213 if f.funcID != funcID_debugCallV1 { 1214 stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 1215 } else { 1216 // debugCallV1's stack map is the register map 1217 // at its call site. 1218 callerPC := frame.lr 1219 caller := findfunc(callerPC) 1220 if !caller.valid() { 1221 println("runtime: debugCallV1 called by unknown caller", hex(callerPC)) 1222 throw("bad debugCallV1") 1223 } 1224 stackid = int32(-1) 1225 if callerPC != caller.entry { 1226 callerPC-- 1227 stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache) 1228 } 1229 if stackid == -1 { 1230 stackid = 0 // in prologue 1231 } 1232 stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps)) 1233 } 1234 if stkmap == nil || stkmap.n <= 0 { 1235 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 1236 throw("missing stackmap") 1237 } 1238 // If nbit == 0, there's no work to do. 1239 if stkmap.nbit > 0 { 1240 if stackid < 0 || stackid >= stkmap.n { 1241 // don't know where we are 1242 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1243 throw("bad symbol table") 1244 } 1245 locals = stackmapdata(stkmap, stackid) 1246 if stackDebug >= 3 && debug { 1247 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n") 1248 } 1249 } else if stackDebug >= 3 && debug { 1250 print(" no locals to adjust\n") 1251 } 1252 } 1253 1254 // Arguments. 1255 if frame.arglen > 0 { 1256 if frame.argmap != nil { 1257 // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. 1258 // In this case, arglen specifies how much of the args section is actually live. 1259 // (It could be either all the args + results, or just the args.) 1260 args = *frame.argmap 1261 n := int32(frame.arglen / sys.PtrSize) 1262 if n < args.n { 1263 args.n = n // Don't use more of the arguments than arglen. 1264 } 1265 } else { 1266 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 1267 if stackmap == nil || stackmap.n <= 0 { 1268 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 1269 throw("missing stackmap") 1270 } 1271 if pcdata < 0 || pcdata >= stackmap.n { 1272 // don't know where we are 1273 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 1274 throw("bad symbol table") 1275 } 1276 if stackmap.nbit > 0 { 1277 args = stackmapdata(stackmap, pcdata) 1278 } 1279 } 1280 } 1281 1282 // stack objects. 1283 p := funcdata(f, _FUNCDATA_StackObjects) 1284 if p != nil { 1285 n := *(*uintptr)(p) 1286 p = add(p, sys.PtrSize) 1287 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} 1288 // Note: the noescape above is needed to keep 1289 // getStackMap from "leaking param content: 1290 // frame". That leak propagates up to getgcmask, then 1291 // GCMask, then verifyGCInfo, which converts the stack 1292 // gcinfo tests into heap gcinfo tests :( 1293 } 1294 1295 return 1296 } 1297 1298 // A stackObjectRecord is generated by the compiler for each stack object in a stack frame. 1299 // This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects. 1300 type stackObjectRecord struct { 1301 // offset in frame 1302 // if negative, offset from varp 1303 // if non-negative, offset from argp 1304 off int 1305 typ *_type 1306 } 1307 1308 //go:nosplit 1309 func morestackc() { 1310 throw("attempt to execute system stack code on user stack") 1311 }