github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code. 71 _StackMin = 4096 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 2048*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 256 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 // Goroutine preemption request. 106 // Stored into g->stackguard0 to cause split stack check failure. 107 // Must be greater than any real sp. 108 // 0xfffffade in hex. 109 const ( 110 _StackPreempt = uintptrMask & -1314 111 _StackFork = uintptrMask & -1234 112 ) 113 114 const ( 115 // stackDebug == 0: no logging 116 // == 1: logging of per-stack operations 117 // == 2: logging of per-frame operations 118 // == 3: logging of per-word updates 119 // == 4: logging of per-word reads 120 stackDebug = 0 121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 124 125 stackCache = 1 126 ) 127 128 const ( 129 uintptrMask = 1<<(8*sys.PtrSize) - 1 130 131 // Goroutine preemption request. 132 // Stored into g->stackguard0 to cause split stack check failure. 133 // Must be greater than any real sp. 134 // 0xfffffade in hex. 135 stackPreempt = uintptrMask & -1314 136 137 // Thread is forking. 138 // Stored into g->stackguard0 to cause split stack check failure. 139 // Must be greater than any real sp. 140 stackFork = uintptrMask & -1234 141 ) 142 143 // Global pool of spans that have free stacks. 144 // Stacks are assigned an order according to size. 145 // order = log_2(size/FixedStack) 146 // There is a free list for each order. 147 // TODO: one lock per order? 148 var stackpool [_NumStackOrders]mSpanList 149 var stackpoolmu mutex 150 151 // Global pool of large stack spans. 152 var stackLarge struct { 153 lock mutex 154 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 155 } 156 157 func stackinit() { 158 if _StackCacheSize&_PageMask != 0 { 159 throw("cache size must be a multiple of page size") 160 } 161 for i := range stackpool { 162 stackpool[i].init() 163 } 164 for i := range stackLarge.free { 165 stackLarge.free[i].init() 166 } 167 } 168 169 // stacklog2 returns ⌊log_2(n)⌋. 170 func stacklog2(n uintptr) int { 171 log2 := 0 172 for n > 1 { 173 n >>= 1 174 log2++ 175 } 176 return log2 177 } 178 179 // Allocates a stack from the free pool. Must be called with 180 // stackpoolmu held. 181 func stackpoolalloc(order uint8) gclinkptr { 182 list := &stackpool[order] 183 s := list.first 184 if s == nil { 185 // no free stacks. Allocate another span worth. 186 s = mheap_.allocStack(_StackCacheSize >> _PageShift) 187 if s == nil { 188 throw("out of memory") 189 } 190 if s.allocCount != 0 { 191 throw("bad allocCount") 192 } 193 if s.stackfreelist.ptr() != nil { 194 throw("bad stackfreelist") 195 } 196 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 197 x := gclinkptr(s.base() + i) 198 x.ptr().next = s.stackfreelist 199 s.stackfreelist = x 200 } 201 list.insert(s) 202 } 203 x := s.stackfreelist 204 if x.ptr() == nil { 205 throw("span has no free stacks") 206 } 207 s.stackfreelist = x.ptr().next 208 s.allocCount++ 209 if s.stackfreelist.ptr() == nil { 210 // all stacks in s are allocated. 211 list.remove(s) 212 } 213 return x 214 } 215 216 // Adds stack x to the free pool. Must be called with stackpoolmu held. 217 func stackpoolfree(x gclinkptr, order uint8) { 218 s := mheap_.lookup(unsafe.Pointer(x)) 219 if s.state != _MSpanStack { 220 throw("freeing stack not in a stack span") 221 } 222 if s.stackfreelist.ptr() == nil { 223 // s will now have a free stack 224 stackpool[order].insert(s) 225 } 226 x.ptr().next = s.stackfreelist 227 s.stackfreelist = x 228 s.allocCount-- 229 if gcphase == _GCoff && s.allocCount == 0 { 230 // Span is completely free. Return it to the heap 231 // immediately if we're sweeping. 232 // 233 // If GC is active, we delay the free until the end of 234 // GC to avoid the following type of situation: 235 // 236 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 237 // 2) The stack that pointer points to is copied 238 // 3) The old stack is freed 239 // 4) The containing span is marked free 240 // 5) GC attempts to mark the SudoG.elem pointer. The 241 // marking fails because the pointer looks like a 242 // pointer into a free span. 243 // 244 // By not freeing, we prevent step #4 until GC is done. 245 stackpool[order].remove(s) 246 s.stackfreelist = 0 247 mheap_.freeStack(s) 248 } 249 } 250 251 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 252 // The pool is required to prevent unlimited growth of per-thread caches. 253 // 254 //go:systemstack 255 func stackcacherefill(c *mcache, order uint8) { 256 if stackDebug >= 1 { 257 print("stackcacherefill order=", order, "\n") 258 } 259 260 // Grab some stacks from the global cache. 261 // Grab half of the allowed capacity (to prevent thrashing). 262 var list gclinkptr 263 var size uintptr 264 lock(&stackpoolmu) 265 for size < _StackCacheSize/2 { 266 x := stackpoolalloc(order) 267 x.ptr().next = list 268 list = x 269 size += _FixedStack << order 270 } 271 unlock(&stackpoolmu) 272 c.stackcache[order].list = list 273 c.stackcache[order].size = size 274 } 275 276 //go:systemstack 277 func stackcacherelease(c *mcache, order uint8) { 278 if stackDebug >= 1 { 279 print("stackcacherelease order=", order, "\n") 280 } 281 x := c.stackcache[order].list 282 size := c.stackcache[order].size 283 lock(&stackpoolmu) 284 for size > _StackCacheSize/2 { 285 y := x.ptr().next 286 stackpoolfree(x, order) 287 x = y 288 size -= _FixedStack << order 289 } 290 unlock(&stackpoolmu) 291 c.stackcache[order].list = x 292 c.stackcache[order].size = size 293 } 294 295 //go:systemstack 296 func stackcache_clear(c *mcache) { 297 if stackDebug >= 1 { 298 print("stackcache clear\n") 299 } 300 lock(&stackpoolmu) 301 for order := uint8(0); order < _NumStackOrders; order++ { 302 x := c.stackcache[order].list 303 for x.ptr() != nil { 304 y := x.ptr().next 305 stackpoolfree(x, order) 306 x = y 307 } 308 c.stackcache[order].list = 0 309 c.stackcache[order].size = 0 310 } 311 unlock(&stackpoolmu) 312 } 313 314 // stackalloc allocates an n byte stack. 315 // 316 // stackalloc must run on the system stack because it uses per-P 317 // resources and must not split the stack. 318 // 319 //go:systemstack 320 func stackalloc(n uint32) (stack, []stkbar) { 321 // Stackalloc must be called on scheduler stack, so that we 322 // never try to grow the stack during the code that stackalloc runs. 323 // Doing so would cause a deadlock (issue 1547). 324 thisg := getg() 325 if thisg != thisg.m.g0 { 326 throw("stackalloc not on scheduler stack") 327 } 328 if n&(n-1) != 0 { 329 throw("stack size not a power of 2") 330 } 331 if stackDebug >= 1 { 332 print("stackalloc ", n, "\n") 333 } 334 335 // Compute the size of stack barrier array. 336 maxstkbar := gcMaxStackBarriers(int(n)) 337 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 338 339 if debug.efence != 0 || stackFromSystem != 0 { 340 allocsz := round(uintptr(n), _PageSize) 341 v := sysAlloc(allocsz, &memstats.stacks_sys) 342 if v == nil { 343 throw("out of memory (stackalloc)") 344 } 345 top := uintptr(n) - nstkbar 346 stkbarSlice := slice{add(v, top), 0, maxstkbar} 347 if stackDebug >= 1 { 348 print(" allocated bytes ", allocsz, " lo ", v, " hi ", hex(uintptr(v) + top), "\n") 349 } 350 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 351 } 352 353 // Small stacks are allocated with a fixed-size free-list allocator. 354 // If we need a stack of a bigger size, we fall back on allocating 355 // a dedicated span. 356 var v unsafe.Pointer 357 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 358 order := uint8(0) 359 n2 := n 360 for n2 > _FixedStack { 361 order++ 362 n2 >>= 1 363 } 364 var x gclinkptr 365 c := thisg.m.mcache 366 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 367 // c == nil can happen in the guts of exitsyscall or 368 // procresize. Just get a stack from the global pool. 369 // Also don't touch stackcache during gc 370 // as it's flushed concurrently. 371 lock(&stackpoolmu) 372 x = stackpoolalloc(order) 373 unlock(&stackpoolmu) 374 } else { 375 x = c.stackcache[order].list 376 if x.ptr() == nil { 377 stackcacherefill(c, order) 378 x = c.stackcache[order].list 379 } 380 c.stackcache[order].list = x.ptr().next 381 c.stackcache[order].size -= uintptr(n) 382 } 383 v = unsafe.Pointer(x) 384 } else { 385 var s *mspan 386 npage := uintptr(n) >> _PageShift 387 log2npage := stacklog2(npage) 388 389 // Try to get a stack from the large stack cache. 390 lock(&stackLarge.lock) 391 if !stackLarge.free[log2npage].isEmpty() { 392 s = stackLarge.free[log2npage].first 393 stackLarge.free[log2npage].remove(s) 394 } 395 unlock(&stackLarge.lock) 396 397 if s == nil { 398 // Allocate a new stack from the heap. 399 s = mheap_.allocStack(npage) 400 if s == nil { 401 throw("out of memory") 402 } 403 } 404 v = unsafe.Pointer(s.base()) 405 } 406 407 if raceenabled { 408 racemalloc(v, uintptr(n)) 409 } 410 if msanenabled { 411 msanmalloc(v, uintptr(n)) 412 } 413 top := uintptr(n) - nstkbar 414 stkbarSlice := slice{add(v, top), 0, maxstkbar} 415 if stackDebug >= 1 { 416 print(" allocated bytes ", n, " lo ", v, " hi ", hex(uintptr(v) + top), "\n") 417 } 418 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 419 } 420 421 // stackfree frees an n byte stack allocation at stk. 422 // 423 // stackfree must run on the system stack because it uses per-P 424 // resources and must not split the stack. 425 // 426 //go:systemstack 427 func stackfree(stk stack, n uintptr) { 428 gp := getg() 429 v := unsafe.Pointer(stk.lo) 430 if n&(n-1) != 0 { 431 throw("stack not a power of 2") 432 } 433 if stk.lo+n < stk.hi { 434 throw("bad stack size") 435 } 436 if stackDebug >= 1 { 437 println("stackfree", v, n) 438 memclr(v, n) // for testing, clobber stack data 439 } 440 if debug.efence != 0 || stackFromSystem != 0 { 441 // TODO(shawn): stackalloc uses rounded size, but doesn't 442 // return this to caller, to avoid leaking memory or not 443 // faulting, apply same rounding here; see issue #17289. 444 n = round(uintptr(n), _PageSize) 445 if debug.efence != 0 || stackFaultOnFree != 0 { 446 sysFault(v, n) 447 } else { 448 sysFree(v, n, &memstats.stacks_sys) 449 } 450 return 451 } 452 if msanenabled { 453 msanfree(v, n) 454 } 455 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 456 order := uint8(0) 457 n2 := n 458 for n2 > _FixedStack { 459 order++ 460 n2 >>= 1 461 } 462 x := gclinkptr(v) 463 c := gp.m.mcache 464 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 465 lock(&stackpoolmu) 466 stackpoolfree(x, order) 467 unlock(&stackpoolmu) 468 } else { 469 if c.stackcache[order].size >= _StackCacheSize { 470 stackcacherelease(c, order) 471 } 472 x.ptr().next = c.stackcache[order].list 473 c.stackcache[order].list = x 474 c.stackcache[order].size += n 475 } 476 } else { 477 s := mheap_.lookup(v) 478 if s.state != _MSpanStack { 479 println(hex(s.base()), v) 480 throw("bad span state") 481 } 482 if gcphase == _GCoff { 483 // Free the stack immediately if we're 484 // sweeping. 485 mheap_.freeStack(s) 486 } else { 487 // If the GC is running, we can't return a 488 // stack span to the heap because it could be 489 // reused as a heap span, and this state 490 // change would race with GC. Add it to the 491 // large stack cache instead. 492 log2npage := stacklog2(s.npages) 493 lock(&stackLarge.lock) 494 stackLarge.free[log2npage].insert(s) 495 unlock(&stackLarge.lock) 496 } 497 } 498 } 499 500 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 501 502 var ptrnames = []string{ 503 0: "scalar", 504 1: "ptr", 505 } 506 507 // Stack frame layout 508 // 509 // (x86) 510 // +------------------+ 511 // | args from caller | 512 // +------------------+ <- frame->argp 513 // | return address | 514 // +------------------+ 515 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 516 // +------------------+ <- frame->varp 517 // | locals | 518 // +------------------+ 519 // | args to callee | 520 // +------------------+ <- frame->sp 521 // 522 // (arm) 523 // +------------------+ 524 // | args from caller | 525 // +------------------+ <- frame->argp 526 // | caller's retaddr | 527 // +------------------+ <- frame->varp 528 // | locals | 529 // +------------------+ 530 // | args to callee | 531 // +------------------+ 532 // | return address | 533 // +------------------+ <- frame->sp 534 // 535 // (sparc64) 536 // +------------------+ 537 // | args from caller | 538 // RFP+BIAS+176 -> +------------------+ <- frame->argp 539 // | save area | 540 // +------------------+ 541 // | caller's retaddr | 542 // RFP+BIAS+120 -> +------------------+ 543 // | caller's RFP | 544 // RFP+BIAS+112 -> +------------------+ 545 // | save area | CALLER 546 // --- RFP+BIAS -> +------------------+ <- frame->varp ------- 547 // | locals | CALLEE 548 // +------------------+ 549 // | args to callee | 550 // RSP+BIAS+176 -> +------------------+ 551 // | save area | 552 // +------------------+ 553 // | return address | (not used in epilog, used by Go) 554 // RSP+BIAS+120 -> +------------------+ 555 // | our RFP | (caller's RSP) 556 // RFP+BIAS+112 -> +------------------+ 557 // | save area | 558 // RSP+BIAS -> +------------------+ <- frame->sp 559 560 type adjustinfo struct { 561 old stack 562 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 563 cache pcvalueCache 564 565 // sghi is the highest sudog.elem on the stack. 566 sghi uintptr 567 } 568 569 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 570 // If so, it rewrites *vpp to point into the new stack. 571 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 572 pp := (*uintptr)(vpp) 573 p := *pp 574 if stackDebug >= 4 { 575 print(" ", pp, ":", hex(p), "\n") 576 } 577 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 578 *pp = p + adjinfo.delta 579 if stackDebug >= 3 { 580 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 581 } 582 } 583 } 584 585 // Adjustrawpointer checks whether *vpp+StackBias is in the old stack described by adjinfo. 586 // If so, it rewrites *vpp to point into the new stack. 587 func adjustrawpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 588 pp := (*uintptr)(vpp) 589 p := *pp + sys.StackBias 590 if stackDebug >= 4 { 591 print(" ", pp, ":", hex(p), "\n") 592 } 593 if adjinfo.old.lo <= p { 594 if p < adjinfo.old.hi { 595 *pp = p + adjinfo.delta - sys.StackBias 596 if stackDebug >= 3 { 597 print(" adjust bp ", pp, ":", hex(p-sys.StackBias), " -> ", hex(*pp), "\n") 598 } 599 } else if stackDebug >= 3 { 600 print(" >= old.hi; NOT adjusting bp ", pp, ":", hex(p-sys.StackBias), "\n") 601 } 602 } else if stackDebug >= 3 { 603 print(" > old.lo; NOT adjusting bp ", pp, ":", hex(p-sys.StackBias), "\n") 604 } 605 } 606 607 // Information from the compiler about the layout of stack frames. 608 type bitvector struct { 609 n int32 // # of bits 610 bytedata *uint8 611 } 612 613 type gobitvector struct { 614 n uintptr 615 bytedata []uint8 616 } 617 618 func gobv(bv bitvector) gobitvector { 619 return gobitvector{ 620 uintptr(bv.n), 621 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 622 } 623 } 624 625 func ptrbit(bv *gobitvector, i uintptr) uint8 { 626 return (bv.bytedata[i/8] >> (i % 8)) & 1 627 } 628 629 // bv describes the memory starting at address scanp. 630 // Adjust any pointers contained therein. 631 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 632 bv := gobv(*cbv) 633 minp := adjinfo.old.lo 634 maxp := adjinfo.old.hi 635 delta := adjinfo.delta 636 num := bv.n 637 // If this frame might contain channel receive slots, use CAS 638 // to adjust pointers. If the slot hasn't been received into 639 // yet, it may contain stack pointers and a concurrent send 640 // could race with adjusting those pointers. (The sent value 641 // itself can never contain stack pointers.) 642 useCAS := uintptr(scanp) < adjinfo.sghi 643 abortAdjust := false 644 for i := uintptr(0); i < num; i++ { 645 if stackDebug >= 4 { 646 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 647 } 648 if ptrbit(&bv, i) == 1 { 649 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 650 retry: 651 p := *pp 652 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 { 653 // Looks like a junk value in a pointer slot. 654 // Live analysis wrong? 655 getg().m.traceback = 2 656 println("old -> hi delta ", hex(delta), "(", delta, ")") 657 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 658 abortAdjust = true 659 } 660 if minp <= p && p < maxp { 661 if stackDebug >= 3 { 662 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 663 } 664 if useCAS { 665 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 666 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 667 goto retry 668 } 669 } else { 670 *pp = p + delta 671 } 672 } 673 } 674 } 675 if abortAdjust { 676 throw("invalid stack pointer(s)") 677 } 678 } 679 680 // Note: the argument/return area is adjusted by the callee. 681 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 682 adjinfo := (*adjustinfo)(arg) 683 targetpc := frame.continpc 684 if targetpc == 0 { 685 // Frame is dead. 686 return true 687 } 688 f := frame.fn 689 if stackDebug >= 2 { 690 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 691 } 692 if f.entry == systemstack_switchPC { 693 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 694 // We will allow it to be copied even though we don't 695 // have full GC info for it (because it is written in asm). 696 return true 697 } 698 // SPARC64's PC holds the address of the *current* instruction. 699 if targetpc != f.entry && sys.GoarchSparc64 == 0 { 700 targetpc-- 701 } 702 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 703 if pcdata == -1 { 704 pcdata = 0 // in prologue 705 } 706 707 // Adjust local variables if stack frame has been allocated. 708 size := frame.varp - frame.sp 709 var minsize uintptr 710 switch sys.ArchFamily { 711 case sys.ARM64: 712 minsize = sys.SpAlign 713 default: 714 minsize = sys.MinFrameSize 715 } 716 717 if sys.ArchFamily == sys.SPARC64 && size >= sys.MinFrameSize { 718 // framepointer is always available if there's a frame on sparc64 719 if stackDebug >= 3 { 720 print(" saved bp\n") 721 } 722 adjustrawpointer(adjinfo, unsafe.Pointer(frame.sp+uintptr(112))) 723 } 724 725 if size > minsize { 726 var bv bitvector 727 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 728 if stackmap == nil || stackmap.n <= 0 { 729 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 730 throw("missing stackmap") 731 } 732 // Locals bitmap information, scan just the pointers in locals. 733 if pcdata < 0 || pcdata >= stackmap.n { 734 // don't know where we are 735 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 736 throw("bad symbol table") 737 } 738 bv = stackmapdata(stackmap, pcdata) 739 size = uintptr(bv.n) * sys.PtrSize 740 if stackDebug >= 3 { 741 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 742 } 743 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 744 } 745 746 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 747 if !framepointer_enabled { 748 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 749 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 750 throw("bad frame layout") 751 } 752 if stackDebug >= 3 { 753 print(" saved bp\n") 754 } 755 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 756 } 757 758 // Adjust arguments. 759 if frame.arglen > 0 { 760 var bv bitvector 761 if frame.argmap != nil { 762 bv = *frame.argmap 763 } else { 764 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 765 if stackmap == nil || stackmap.n <= 0 { 766 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 767 throw("missing stackmap") 768 } 769 if pcdata < 0 || pcdata >= stackmap.n { 770 // don't know where we are 771 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 772 throw("bad symbol table") 773 } 774 bv = stackmapdata(stackmap, pcdata) 775 } 776 if stackDebug >= 3 { 777 print(" args\n") 778 } 779 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 780 } 781 return true 782 } 783 784 func adjustctxt(gp *g, adjinfo *adjustinfo) { 785 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 786 } 787 788 func adjustdefers(gp *g, adjinfo *adjustinfo) { 789 // Adjust defer argument blocks the same way we adjust active stack frames. 790 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 791 792 // Adjust pointers in the Defer structs. 793 // Defer structs themselves are never on the stack. 794 for d := gp._defer; d != nil; d = d.link { 795 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 796 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 797 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 798 } 799 } 800 801 func adjustpanics(gp *g, adjinfo *adjustinfo) { 802 // Panics are on stack and already adjusted. 803 // Update pointer to head of list in G. 804 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 805 } 806 807 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 808 // the data elements pointed to by a SudoG structure 809 // might be in the stack. 810 for s := gp.waiting; s != nil; s = s.waitlink { 811 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 812 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone)) 813 } 814 } 815 816 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 817 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 818 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr)) 819 } 820 } 821 822 func fillstack(stk stack, b byte) { 823 for p := stk.lo; p < stk.hi; p++ { 824 *(*byte)(unsafe.Pointer(p)) = b 825 } 826 } 827 828 func findsghi(gp *g, stk stack) uintptr { 829 var sghi uintptr 830 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 831 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 832 if stk.lo <= p && p < stk.hi && p > sghi { 833 sghi = p 834 } 835 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone) 836 if stk.lo <= p && p < stk.hi && p > sghi { 837 sghi = p 838 } 839 } 840 return sghi 841 } 842 843 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 844 // stack they refer to while synchronizing with concurrent channel 845 // operations. It returns the number of bytes of stack copied. 846 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 847 if gp.waiting == nil { 848 return 0 849 } 850 851 // Lock channels to prevent concurrent send/receive. 852 // It's important that we *only* do this for async 853 // copystack; otherwise, gp may be in the middle of 854 // putting itself on wait queues and this would 855 // self-deadlock. 856 var lastc *hchan 857 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 858 if sg.c != lastc { 859 lock(&sg.c.lock) 860 } 861 lastc = sg.c 862 } 863 864 // Adjust sudogs. 865 adjustsudogs(gp, adjinfo) 866 867 // Copy the part of the stack the sudogs point in to 868 // while holding the lock to prevent races on 869 // send/receive slots. 870 var sgsize uintptr 871 if adjinfo.sghi != 0 { 872 oldBot := adjinfo.old.hi - used 873 newBot := oldBot + adjinfo.delta 874 sgsize = adjinfo.sghi - oldBot 875 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 876 } 877 878 // Unlock channels. 879 lastc = nil 880 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 881 if sg.c != lastc { 882 unlock(&sg.c.lock) 883 } 884 lastc = sg.c 885 } 886 887 return sgsize 888 } 889 890 // Copies gp's stack to a new stack of a different size. 891 // Caller must have changed gp status to Gcopystack. 892 // 893 // If sync is true, this is a self-triggered stack growth and, in 894 // particular, no other G may be writing to gp's stack (e.g., via a 895 // channel operation). If sync is false, copystack protects against 896 // concurrent channel operations. 897 func copystack(gp *g, newsize uintptr, sync bool) { 898 if gp.syscallsp != 0 { 899 throw("stack growth not allowed in system call") 900 } 901 old := gp.stack 902 if old.lo == 0 { 903 throw("nil stackbase") 904 } 905 used := old.hi - gp.sched.sp 906 907 // allocate new stack 908 new, newstkbar := stackalloc(uint32(newsize)) 909 if stackPoisonCopy != 0 { 910 fillstack(new, 0xfd) 911 } 912 if stackDebug >= 1 { 913 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 914 } 915 916 // Compute adjustment. 917 var adjinfo adjustinfo 918 adjinfo.old = old 919 adjinfo.delta = new.hi - old.hi 920 921 // Adjust sudogs, synchronizing with channel ops if necessary. 922 ncopy := used 923 if sync { 924 adjustsudogs(gp, &adjinfo) 925 } else { 926 // sudogs can point in to the stack. During concurrent 927 // shrinking, these areas may be written to. Find the 928 // highest such pointer so we can handle everything 929 // there and below carefully. (This shouldn't be far 930 // from the bottom of the stack, so there's little 931 // cost in handling everything below it carefully.) 932 adjinfo.sghi = findsghi(gp, old) 933 934 // Synchronize with channel ops and copy the part of 935 // the stack they may interact with. 936 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 937 } 938 939 // Copy the stack (or the rest of it) to the new location 940 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 941 942 // Disallow sigprof scans of this stack and block if there's 943 // one in progress. 944 gcLockStackBarriers(gp) 945 946 // Adjust remaining structures that have pointers into stacks. 947 // We have to do most of these before we traceback the new 948 // stack because gentraceback uses them. 949 adjustctxt(gp, &adjinfo) 950 adjustdefers(gp, &adjinfo) 951 adjustpanics(gp, &adjinfo) 952 adjuststkbar(gp, &adjinfo) 953 if adjinfo.sghi != 0 { 954 adjinfo.sghi += adjinfo.delta 955 } 956 957 // copy old stack barriers to new stack barrier array 958 newstkbar = newstkbar[:len(gp.stkbar)] 959 copy(newstkbar, gp.stkbar) 960 961 // Swap out old stack for new one 962 gp.stack = new 963 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 964 gp.sched.sp = new.hi - used 965 if sys.ArchFamily == sys.SPARC64 { 966 adjustpointer(&adjinfo, unsafe.Pointer(&gp.sched.bp)) 967 } 968 oldsize := gp.stackAlloc 969 gp.stackAlloc = newsize 970 gp.stkbar = newstkbar 971 gp.stktopsp += adjinfo.delta 972 973 // Adjust pointers in the new stack. 974 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 975 976 gcUnlockStackBarriers(gp) 977 978 // free old stack 979 if stackPoisonCopy != 0 { 980 fillstack(old, 0xfc) 981 } 982 stackfree(old, oldsize) 983 } 984 985 // round x up to a power of 2. 986 func round2(x int32) int32 { 987 s := uint(0) 988 for 1<<s < x { 989 s++ 990 } 991 return 1 << s 992 } 993 994 // Called from runtime·morestack when more stack is needed. 995 // Allocate larger stack and relocate to new stack. 996 // Stack growth is multiplicative, for constant amortized cost. 997 // 998 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 999 // If the GC is trying to stop this g then it will set preemptscan to true. 1000 func newstack() { 1001 thisg := getg() 1002 // TODO: double check all gp. shouldn't be getg(). 1003 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 1004 throw("stack growth after fork") 1005 } 1006 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 1007 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 1008 morebuf := thisg.m.morebuf 1009 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 1010 throw("runtime: wrong goroutine in newstack") 1011 } 1012 if thisg.m.curg.throwsplit { 1013 gp := thisg.m.curg 1014 // Update syscallsp, syscallpc in case traceback uses them. 1015 morebuf := thisg.m.morebuf 1016 gp.syscallsp = morebuf.sp 1017 gp.syscallpc = morebuf.pc 1018 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1019 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " bp:", hex(morebuf.bp), " lr:", hex(morebuf.lr), " stackguard=", hex(morebuf.g.ptr().stackguard0), "}\n", 1020 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " bp=", hex(gp.sched.bp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1021 1022 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 1023 throw("runtime: stack split at bad time") 1024 } 1025 1026 gp := thisg.m.curg 1027 morebuf := thisg.m.morebuf 1028 thisg.m.morebuf.pc = 0 1029 thisg.m.morebuf.lr = 0 1030 thisg.m.morebuf.bp = 0 1031 thisg.m.morebuf.sp = 0 1032 thisg.m.morebuf.g = 0 1033 rewindmorestack(&gp.sched) 1034 1035 // NOTE: stackguard0 may change underfoot, if another thread 1036 // is about to try to preempt gp. Read it just once and use that same 1037 // value now and below. 1038 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 1039 1040 // Be conservative about where we preempt. 1041 // We are interested in preempting user Go code, not runtime code. 1042 // If we're holding locks, mallocing, or preemption is disabled, don't 1043 // preempt. 1044 // This check is very early in newstack so that even the status change 1045 // from Grunning to Gwaiting and back doesn't happen in this case. 1046 // That status change by itself can be viewed as a small preemption, 1047 // because the GC might change Gwaiting to Gscanwaiting, and then 1048 // this goroutine has to wait for the GC to finish before continuing. 1049 // If the GC is in some way dependent on this goroutine (for example, 1050 // it needs a lock held by the goroutine), that small preemption turns 1051 // into a real deadlock. 1052 if preempt { 1053 if stackDebug >= 1 { 1054 print("runtime: preempt0, newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1055 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " bp:", hex(morebuf.bp), " lr:", hex(morebuf.lr), " stackguard=", hex(morebuf.g.ptr().stackguard0), "}\n", 1056 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " bp:", hex(gp.sched.bp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n\n") 1057 } 1058 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 1059 // Let the goroutine keep running for now. 1060 // gp->preempt is set, so it will be preempted next time. 1061 gp.stackguard0 = gp.stack.lo + _StackGuard 1062 gogo(&gp.sched) // never return 1063 } 1064 } 1065 1066 if gp.stack.lo == 0 { 1067 throw("missing stack in newstack") 1068 } 1069 sp := gp.sched.sp 1070 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 1071 // The call to morestack cost a word. 1072 sp -= sys.PtrSize 1073 } 1074 if stackDebug >= 1 || sp < gp.stack.lo { 1075 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1076 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " bp:", hex(morebuf.bp), " lr:", hex(morebuf.lr), " stackguard=", hex(morebuf.g.ptr().stackguard0), "}\n", 1077 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " bp:", hex(gp.sched.bp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1078 } 1079 if sp < gp.stack.lo { 1080 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 1081 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1082 throw("runtime: split stack overflow") 1083 } 1084 1085 if gp.sched.ctxt != nil { 1086 // morestack wrote sched.ctxt on its way in here, 1087 // without a write barrier. Run the write barrier now. 1088 // It is not possible to be preempted between then 1089 // and now, so it's okay. 1090 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) 1091 } 1092 1093 if preempt { 1094 if stackDebug >= 1 { 1095 print("runtime: preempt1, newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1096 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " bp:", hex(morebuf.bp), " lr:", hex(morebuf.lr), " stackguard=", hex(morebuf.g.ptr().stackguard0), "}\n", 1097 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " bp:", hex(gp.sched.bp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n\n") 1098 } 1099 if gp == thisg.m.g0 { 1100 throw("runtime: preempt g0") 1101 } 1102 if thisg.m.p == 0 && thisg.m.locks == 0 { 1103 throw("runtime: g is running but p is not") 1104 } 1105 // Synchronize with scang. 1106 casgstatus(gp, _Grunning, _Gwaiting) 1107 if gp.preemptscan { 1108 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1109 // Likely to be racing with the GC as 1110 // it sees a _Gwaiting and does the 1111 // stack scan. If so, gcworkdone will 1112 // be set and gcphasework will simply 1113 // return. 1114 } 1115 if !gp.gcscandone { 1116 // gcw is safe because we're on the 1117 // system stack. 1118 gcw := &gp.m.p.ptr().gcw 1119 scanstack(gp, gcw) 1120 if gcBlackenPromptly { 1121 gcw.dispose() 1122 } 1123 gp.gcscandone = true 1124 } 1125 gp.preemptscan = false 1126 gp.preempt = false 1127 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1128 // This clears gcscanvalid. 1129 casgstatus(gp, _Gwaiting, _Grunning) 1130 gp.stackguard0 = gp.stack.lo + _StackGuard 1131 gogo(&gp.sched) // never return 1132 } 1133 1134 // Act like goroutine called runtime.Gosched. 1135 casgstatus(gp, _Gwaiting, _Grunning) 1136 gopreempt_m(gp) // never return 1137 } 1138 1139 // Allocate a bigger segment and move the stack. 1140 oldsize := int(gp.stackAlloc) 1141 newsize := oldsize * 2 1142 if uintptr(newsize) > maxstacksize { 1143 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1144 throw("stack overflow") 1145 } 1146 1147 // The goroutine must be executing in order to call newstack, 1148 // so it must be Grunning (or Gscanrunning). 1149 casgstatus(gp, _Grunning, _Gcopystack) 1150 1151 // The concurrent GC will not scan the stack while we are doing the copy since 1152 // the gp is in a Gcopystack status. 1153 copystack(gp, uintptr(newsize), true) 1154 if stackDebug >= 1 { 1155 print("stack grow done\n\n") 1156 } 1157 casgstatus(gp, _Gcopystack, _Grunning) 1158 gogo(&gp.sched) 1159 } 1160 1161 //go:nosplit 1162 func nilfunc() { 1163 *(*uint8)(nil) = 0 1164 } 1165 1166 // adjust Gobuf as if it executed a call to fn 1167 // and then did an immediate gosave. 1168 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1169 var fn unsafe.Pointer 1170 if fv != nil { 1171 fn = unsafe.Pointer(fv.fn) 1172 } else { 1173 fn = unsafe.Pointer(funcPC(nilfunc)) 1174 } 1175 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1176 } 1177 1178 // Maybe shrink the stack being used by gp. 1179 // Called at garbage collection time. 1180 // gp must be stopped, but the world need not be. 1181 func shrinkstack(gp *g) { 1182 gstatus := readgstatus(gp) 1183 if gstatus&^_Gscan == _Gdead { 1184 if gp.stack.lo != 0 { 1185 // Free whole stack - it will get reallocated 1186 // if G is used again. 1187 stackfree(gp.stack, gp.stackAlloc) 1188 gp.stack.lo = 0 1189 gp.stack.hi = 0 1190 gp.stkbar = nil 1191 gp.stkbarPos = 0 1192 } 1193 return 1194 } 1195 if gp.stack.lo == 0 { 1196 throw("missing stack in shrinkstack") 1197 } 1198 if gstatus&_Gscan == 0 { 1199 throw("bad status in shrinkstack") 1200 } 1201 1202 if debug.gcshrinkstackoff > 0 { 1203 return 1204 } 1205 1206 oldsize := gp.stackAlloc 1207 newsize := oldsize / 2 1208 // Don't shrink the allocation below the minimum-sized stack 1209 // allocation. 1210 if newsize < _FixedStack { 1211 return 1212 } 1213 // Compute how much of the stack is currently in use and only 1214 // shrink the stack if gp is using less than a quarter of its 1215 // current stack. The currently used stack includes everything 1216 // down to the SP plus the stack guard space that ensures 1217 // there's room for nosplit functions. 1218 avail := gp.stack.hi - gp.stack.lo 1219 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1220 return 1221 } 1222 1223 // We can't copy the stack if we're in a syscall. 1224 // The syscall might have pointers into the stack. 1225 if gp.syscallsp != 0 { 1226 return 1227 } 1228 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1229 return 1230 } 1231 1232 if stackDebug > 0 { 1233 print("shrinking stack ", oldsize, "->", newsize, "\n") 1234 } 1235 1236 copystack(gp, newsize, false) 1237 } 1238 1239 // freeStackSpans frees unused stack spans at the end of GC. 1240 func freeStackSpans() { 1241 lock(&stackpoolmu) 1242 1243 // Scan stack pools for empty stack spans. 1244 for order := range stackpool { 1245 list := &stackpool[order] 1246 for s := list.first; s != nil; { 1247 next := s.next 1248 if s.allocCount == 0 { 1249 list.remove(s) 1250 s.stackfreelist = 0 1251 mheap_.freeStack(s) 1252 } 1253 s = next 1254 } 1255 } 1256 1257 unlock(&stackpoolmu) 1258 1259 // Free large stack spans. 1260 lock(&stackLarge.lock) 1261 for i := range stackLarge.free { 1262 for s := stackLarge.free[i].first; s != nil; { 1263 next := s.next 1264 stackLarge.free[i].remove(s) 1265 mheap_.freeStack(s) 1266 s = next 1267 } 1268 } 1269 unlock(&stackLarge.lock) 1270 } 1271 1272 //go:nosplit 1273 func morestackc() { 1274 systemstack(func() { 1275 throw("attempt to execute C code on Go stack") 1276 }) 1277 }