github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/cpu" 10 "internal/goarch" 11 "internal/goos" 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 /* 18 Stack layout parameters. 19 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 20 21 The per-goroutine g->stackguard is set to point StackGuard bytes 22 above the bottom of the stack. Each function compares its stack 23 pointer against g->stackguard to check for overflow. To cut one 24 instruction from the check sequence for functions with tiny frames, 25 the stack is allowed to protrude StackSmall bytes below the stack 26 guard. Functions with large frames don't bother with the check and 27 always call morestack. The sequences are (for amd64, others are 28 similar): 29 30 guard = g->stackguard 31 frame = function's stack frame size 32 argsize = size of function arguments (call + return) 33 34 stack frame size <= StackSmall: 35 CMPQ guard, SP 36 JHI 3(PC) 37 MOVQ m->morearg, $(argsize << 32) 38 CALL morestack(SB) 39 40 stack frame size > StackSmall but < StackBig 41 LEAQ (frame-StackSmall)(SP), R0 42 CMPQ guard, R0 43 JHI 3(PC) 44 MOVQ m->morearg, $(argsize << 32) 45 CALL morestack(SB) 46 47 stack frame size >= StackBig: 48 MOVQ m->morearg, $((argsize << 32) | frame) 49 CALL morestack(SB) 50 51 The bottom StackGuard - StackSmall bytes are important: there has 52 to be enough room to execute functions that refuse to check for 53 stack overflow, either because they need to be adjacent to the 54 actual caller's frame (deferproc) or because they handle the imminent 55 stack overflow (morestack). 56 57 For example, deferproc might call malloc, which does one of the 58 above checks (without allocating a full frame), which might trigger 59 a call to morestack. This sequence needs to fit in the bottom 60 section of the stack. On amd64, morestack's frame is 40 bytes, and 61 deferproc's frame is 56 bytes. That fits well within the 62 StackGuard - StackSmall bytes at the bottom. 63 The linkers explore all possible call traces involving non-splitting 64 functions to make sure that this limit cannot be violated. 65 */ 66 67 const ( 68 // StackSystem is a number of additional bytes to add 69 // to each stack below the usual guard area for OS-specific 70 // purposes like signal handling. Used on Windows, Plan 9, 71 // and iOS because they do not use a separate stack. 72 _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 73 74 // The minimum size of stack used by Go code 75 _StackMin = 2048 76 77 // The minimum stack size to allocate. 78 // The hackery here rounds FixedStack0 up to a power of 2. 79 _FixedStack0 = _StackMin + _StackSystem 80 _FixedStack1 = _FixedStack0 - 1 81 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 82 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 83 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 84 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 85 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 86 _FixedStack = _FixedStack6 + 1 87 88 // Functions that need frames bigger than this use an extra 89 // instruction to do the stack split check, to avoid overflow 90 // in case SP - framesize wraps below zero. 91 // This value can be no bigger than the size of the unmapped 92 // space at zero. 93 _StackBig = 4096 94 95 // The stack guard is a pointer this many bytes above the 96 // bottom of the stack. 97 // 98 // The guard leaves enough room for one _StackSmall frame plus 99 // a _StackLimit chain of NOSPLIT calls plus _StackSystem 100 // bytes for the OS. 101 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem 102 103 // After a stack split check the SP is allowed to be this 104 // many bytes below the stack guard. This saves an instruction 105 // in the checking sequence for tiny frames. 106 _StackSmall = 128 107 108 // The maximum number of bytes that a chain of NOSPLIT 109 // functions can use. 110 _StackLimit = _StackGuard - _StackSystem - _StackSmall 111 ) 112 113 const ( 114 // stackDebug == 0: no logging 115 // == 1: logging of per-stack operations 116 // == 2: logging of per-frame operations 117 // == 3: logging of per-word updates 118 // == 4: logging of per-word reads 119 stackDebug = 0 120 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 121 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 122 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 123 stackNoCache = 0 // disable per-P small stack caches 124 125 // check the BP links during traceback. 126 debugCheckBP = false 127 ) 128 129 const ( 130 uintptrMask = 1<<(8*goarch.PtrSize) - 1 131 132 // The values below can be stored to g.stackguard0 to force 133 // the next stack check to fail. 134 // These are all larger than any real SP. 135 136 // Goroutine preemption request. 137 // 0xfffffade in hex. 138 stackPreempt = uintptrMask & -1314 139 140 // Thread is forking. Causes a split stack check failure. 141 // 0xfffffb2e in hex. 142 stackFork = uintptrMask & -1234 143 144 // Force a stack movement. Used for debugging. 145 // 0xfffffeed in hex. 146 stackForceMove = uintptrMask & -275 147 148 // stackPoisonMin is the lowest allowed stack poison value. 149 stackPoisonMin = uintptrMask & -4096 150 ) 151 152 // Global pool of spans that have free stacks. 153 // Stacks are assigned an order according to size. 154 // 155 // order = log_2(size/FixedStack) 156 // 157 // There is a free list for each order. 158 var stackpool [_NumStackOrders]struct { 159 item stackpoolItem 160 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte 161 } 162 163 type stackpoolItem struct { 164 _ sys.NotInHeap 165 mu mutex 166 span mSpanList 167 } 168 169 // Global pool of large stack spans. 170 var stackLarge struct { 171 lock mutex 172 free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages) 173 } 174 175 func stackinit() { 176 if _StackCacheSize&_PageMask != 0 { 177 throw("cache size must be a multiple of page size") 178 } 179 for i := range stackpool { 180 stackpool[i].item.span.init() 181 lockInit(&stackpool[i].item.mu, lockRankStackpool) 182 } 183 for i := range stackLarge.free { 184 stackLarge.free[i].init() 185 lockInit(&stackLarge.lock, lockRankStackLarge) 186 } 187 } 188 189 // stacklog2 returns ⌊log_2(n)⌋. 190 func stacklog2(n uintptr) int { 191 log2 := 0 192 for n > 1 { 193 n >>= 1 194 log2++ 195 } 196 return log2 197 } 198 199 // Allocates a stack from the free pool. Must be called with 200 // stackpool[order].item.mu held. 201 func stackpoolalloc(order uint8) gclinkptr { 202 list := &stackpool[order].item.span 203 s := list.first 204 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 205 if s == nil { 206 // no free stacks. Allocate another span worth. 207 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack) 208 if s == nil { 209 throw("out of memory") 210 } 211 if s.allocCount != 0 { 212 throw("bad allocCount") 213 } 214 if s.manualFreeList.ptr() != nil { 215 throw("bad manualFreeList") 216 } 217 osStackAlloc(s) 218 s.elemsize = _FixedStack << order 219 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 220 x := gclinkptr(s.base() + i) 221 x.ptr().next = s.manualFreeList 222 s.manualFreeList = x 223 } 224 list.insert(s) 225 } 226 x := s.manualFreeList 227 if x.ptr() == nil { 228 throw("span has no free stacks") 229 } 230 s.manualFreeList = x.ptr().next 231 s.allocCount++ 232 if s.manualFreeList.ptr() == nil { 233 // all stacks in s are allocated. 234 list.remove(s) 235 } 236 return x 237 } 238 239 // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held. 240 func stackpoolfree(x gclinkptr, order uint8) { 241 s := spanOfUnchecked(uintptr(x)) 242 if s.state.get() != mSpanManual { 243 throw("freeing stack not in a stack span") 244 } 245 if s.manualFreeList.ptr() == nil { 246 // s will now have a free stack 247 stackpool[order].item.span.insert(s) 248 } 249 x.ptr().next = s.manualFreeList 250 s.manualFreeList = x 251 s.allocCount-- 252 if gcphase == _GCoff && s.allocCount == 0 { 253 // Span is completely free. Return it to the heap 254 // immediately if we're sweeping. 255 // 256 // If GC is active, we delay the free until the end of 257 // GC to avoid the following type of situation: 258 // 259 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 260 // 2) The stack that pointer points to is copied 261 // 3) The old stack is freed 262 // 4) The containing span is marked free 263 // 5) GC attempts to mark the SudoG.elem pointer. The 264 // marking fails because the pointer looks like a 265 // pointer into a free span. 266 // 267 // By not freeing, we prevent step #4 until GC is done. 268 stackpool[order].item.span.remove(s) 269 s.manualFreeList = 0 270 osStackFree(s) 271 mheap_.freeManual(s, spanAllocStack) 272 } 273 } 274 275 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 276 // The pool is required to prevent unlimited growth of per-thread caches. 277 // 278 //go:systemstack 279 func stackcacherefill(c *mcache, order uint8) { 280 if stackDebug >= 1 { 281 print("stackcacherefill order=", order, "\n") 282 } 283 284 // Grab some stacks from the global cache. 285 // Grab half of the allowed capacity (to prevent thrashing). 286 var list gclinkptr 287 var size uintptr 288 lock(&stackpool[order].item.mu) 289 for size < _StackCacheSize/2 { 290 x := stackpoolalloc(order) 291 x.ptr().next = list 292 list = x 293 size += _FixedStack << order 294 } 295 unlock(&stackpool[order].item.mu) 296 c.stackcache[order].list = list 297 c.stackcache[order].size = size 298 } 299 300 //go:systemstack 301 func stackcacherelease(c *mcache, order uint8) { 302 if stackDebug >= 1 { 303 print("stackcacherelease order=", order, "\n") 304 } 305 x := c.stackcache[order].list 306 size := c.stackcache[order].size 307 lock(&stackpool[order].item.mu) 308 for size > _StackCacheSize/2 { 309 y := x.ptr().next 310 stackpoolfree(x, order) 311 x = y 312 size -= _FixedStack << order 313 } 314 unlock(&stackpool[order].item.mu) 315 c.stackcache[order].list = x 316 c.stackcache[order].size = size 317 } 318 319 //go:systemstack 320 func stackcache_clear(c *mcache) { 321 if stackDebug >= 1 { 322 print("stackcache clear\n") 323 } 324 for order := uint8(0); order < _NumStackOrders; order++ { 325 lock(&stackpool[order].item.mu) 326 x := c.stackcache[order].list 327 for x.ptr() != nil { 328 y := x.ptr().next 329 stackpoolfree(x, order) 330 x = y 331 } 332 c.stackcache[order].list = 0 333 c.stackcache[order].size = 0 334 unlock(&stackpool[order].item.mu) 335 } 336 } 337 338 // stackalloc allocates an n byte stack. 339 // 340 // stackalloc must run on the system stack because it uses per-P 341 // resources and must not split the stack. 342 // 343 //go:systemstack 344 func stackalloc(n uint32) stack { 345 // Stackalloc must be called on scheduler stack, so that we 346 // never try to grow the stack during the code that stackalloc runs. 347 // Doing so would cause a deadlock (issue 1547). 348 thisg := getg() 349 if thisg != thisg.m.g0 { 350 throw("stackalloc not on scheduler stack") 351 } 352 if n&(n-1) != 0 { 353 throw("stack size not a power of 2") 354 } 355 if stackDebug >= 1 { 356 print("stackalloc ", n, "\n") 357 } 358 359 if debug.efence != 0 || stackFromSystem != 0 { 360 n = uint32(alignUp(uintptr(n), physPageSize)) 361 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 362 if v == nil { 363 throw("out of memory (stackalloc)") 364 } 365 return stack{uintptr(v), uintptr(v) + uintptr(n)} 366 } 367 368 // Small stacks are allocated with a fixed-size free-list allocator. 369 // If we need a stack of a bigger size, we fall back on allocating 370 // a dedicated span. 371 var v unsafe.Pointer 372 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 373 order := uint8(0) 374 n2 := n 375 for n2 > _FixedStack { 376 order++ 377 n2 >>= 1 378 } 379 var x gclinkptr 380 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { 381 // thisg.m.p == 0 can happen in the guts of exitsyscall 382 // or procresize. Just get a stack from the global pool. 383 // Also don't touch stackcache during gc 384 // as it's flushed concurrently. 385 lock(&stackpool[order].item.mu) 386 x = stackpoolalloc(order) 387 unlock(&stackpool[order].item.mu) 388 } else { 389 c := thisg.m.p.ptr().mcache 390 x = c.stackcache[order].list 391 if x.ptr() == nil { 392 stackcacherefill(c, order) 393 x = c.stackcache[order].list 394 } 395 c.stackcache[order].list = x.ptr().next 396 c.stackcache[order].size -= uintptr(n) 397 } 398 v = unsafe.Pointer(x) 399 } else { 400 var s *mspan 401 npage := uintptr(n) >> _PageShift 402 log2npage := stacklog2(npage) 403 404 // Try to get a stack from the large stack cache. 405 lock(&stackLarge.lock) 406 if !stackLarge.free[log2npage].isEmpty() { 407 s = stackLarge.free[log2npage].first 408 stackLarge.free[log2npage].remove(s) 409 } 410 unlock(&stackLarge.lock) 411 412 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) 413 414 if s == nil { 415 // Allocate a new stack from the heap. 416 s = mheap_.allocManual(npage, spanAllocStack) 417 if s == nil { 418 throw("out of memory") 419 } 420 osStackAlloc(s) 421 s.elemsize = uintptr(n) 422 } 423 v = unsafe.Pointer(s.base()) 424 } 425 426 if raceenabled { 427 racemalloc(v, uintptr(n)) 428 } 429 if msanenabled { 430 msanmalloc(v, uintptr(n)) 431 } 432 if asanenabled { 433 asanunpoison(v, uintptr(n)) 434 } 435 if stackDebug >= 1 { 436 print(" allocated ", v, "\n") 437 } 438 return stack{uintptr(v), uintptr(v) + uintptr(n)} 439 } 440 441 // stackfree frees an n byte stack allocation at stk. 442 // 443 // stackfree must run on the system stack because it uses per-P 444 // resources and must not split the stack. 445 // 446 //go:systemstack 447 func stackfree(stk stack) { 448 gp := getg() 449 v := unsafe.Pointer(stk.lo) 450 n := stk.hi - stk.lo 451 if n&(n-1) != 0 { 452 throw("stack not a power of 2") 453 } 454 if stk.lo+n < stk.hi { 455 throw("bad stack size") 456 } 457 if stackDebug >= 1 { 458 println("stackfree", v, n) 459 memclrNoHeapPointers(v, n) // for testing, clobber stack data 460 } 461 if debug.efence != 0 || stackFromSystem != 0 { 462 if debug.efence != 0 || stackFaultOnFree != 0 { 463 sysFault(v, n) 464 } else { 465 sysFree(v, n, &memstats.stacks_sys) 466 } 467 return 468 } 469 if msanenabled { 470 msanfree(v, n) 471 } 472 if asanenabled { 473 asanpoison(v, n) 474 } 475 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 476 order := uint8(0) 477 n2 := n 478 for n2 > _FixedStack { 479 order++ 480 n2 >>= 1 481 } 482 x := gclinkptr(v) 483 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { 484 lock(&stackpool[order].item.mu) 485 stackpoolfree(x, order) 486 unlock(&stackpool[order].item.mu) 487 } else { 488 c := gp.m.p.ptr().mcache 489 if c.stackcache[order].size >= _StackCacheSize { 490 stackcacherelease(c, order) 491 } 492 x.ptr().next = c.stackcache[order].list 493 c.stackcache[order].list = x 494 c.stackcache[order].size += n 495 } 496 } else { 497 s := spanOfUnchecked(uintptr(v)) 498 if s.state.get() != mSpanManual { 499 println(hex(s.base()), v) 500 throw("bad span state") 501 } 502 if gcphase == _GCoff { 503 // Free the stack immediately if we're 504 // sweeping. 505 osStackFree(s) 506 mheap_.freeManual(s, spanAllocStack) 507 } else { 508 // If the GC is running, we can't return a 509 // stack span to the heap because it could be 510 // reused as a heap span, and this state 511 // change would race with GC. Add it to the 512 // large stack cache instead. 513 log2npage := stacklog2(s.npages) 514 lock(&stackLarge.lock) 515 stackLarge.free[log2npage].insert(s) 516 unlock(&stackLarge.lock) 517 } 518 } 519 } 520 521 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 522 523 var maxstackceiling = maxstacksize 524 525 var ptrnames = []string{ 526 0: "scalar", 527 1: "ptr", 528 } 529 530 // Stack frame layout 531 // 532 // (x86) 533 // +------------------+ 534 // | args from caller | 535 // +------------------+ <- frame->argp 536 // | return address | 537 // +------------------+ 538 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 539 // +------------------+ <- frame->varp 540 // | locals | 541 // +------------------+ 542 // | args to callee | 543 // +------------------+ <- frame->sp 544 // 545 // (arm) 546 // +------------------+ 547 // | args from caller | 548 // +------------------+ <- frame->argp 549 // | caller's retaddr | 550 // +------------------+ <- frame->varp 551 // | locals | 552 // +------------------+ 553 // | args to callee | 554 // +------------------+ 555 // | return address | 556 // +------------------+ <- frame->sp 557 558 type adjustinfo struct { 559 old stack 560 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 561 cache pcvalueCache 562 563 // sghi is the highest sudog.elem on the stack. 564 sghi uintptr 565 } 566 567 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 568 // If so, it rewrites *vpp to point into the new stack. 569 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 570 pp := (*uintptr)(vpp) 571 p := *pp 572 if stackDebug >= 4 { 573 print(" ", pp, ":", hex(p), "\n") 574 } 575 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 576 *pp = p + adjinfo.delta 577 if stackDebug >= 3 { 578 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 579 } 580 } 581 } 582 583 // Information from the compiler about the layout of stack frames. 584 // Note: this type must agree with reflect.bitVector. 585 type bitvector struct { 586 n int32 // # of bits 587 bytedata *uint8 588 } 589 590 // ptrbit returns the i'th bit in bv. 591 // ptrbit is less efficient than iterating directly over bitvector bits, 592 // and should only be used in non-performance-critical code. 593 // See adjustpointers for an example of a high-efficiency walk of a bitvector. 594 func (bv *bitvector) ptrbit(i uintptr) uint8 { 595 b := *(addb(bv.bytedata, i/8)) 596 return (b >> (i % 8)) & 1 597 } 598 599 // bv describes the memory starting at address scanp. 600 // Adjust any pointers contained therein. 601 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 602 minp := adjinfo.old.lo 603 maxp := adjinfo.old.hi 604 delta := adjinfo.delta 605 num := uintptr(bv.n) 606 // If this frame might contain channel receive slots, use CAS 607 // to adjust pointers. If the slot hasn't been received into 608 // yet, it may contain stack pointers and a concurrent send 609 // could race with adjusting those pointers. (The sent value 610 // itself can never contain stack pointers.) 611 useCAS := uintptr(scanp) < adjinfo.sghi 612 for i := uintptr(0); i < num; i += 8 { 613 if stackDebug >= 4 { 614 for j := uintptr(0); j < 8; j++ { 615 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 616 } 617 } 618 b := *(addb(bv.bytedata, i/8)) 619 for b != 0 { 620 j := uintptr(sys.TrailingZeros8(b)) 621 b &= b - 1 622 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize)) 623 retry: 624 p := *pp 625 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 626 // Looks like a junk value in a pointer slot. 627 // Live analysis wrong? 628 getg().m.traceback = 2 629 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 630 throw("invalid pointer found on stack") 631 } 632 if minp <= p && p < maxp { 633 if stackDebug >= 3 { 634 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 635 } 636 if useCAS { 637 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 638 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 639 goto retry 640 } 641 } else { 642 *pp = p + delta 643 } 644 } 645 } 646 } 647 } 648 649 // Note: the argument/return area is adjusted by the callee. 650 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 651 adjinfo := (*adjustinfo)(arg) 652 if frame.continpc == 0 { 653 // Frame is dead. 654 return true 655 } 656 f := frame.fn 657 if stackDebug >= 2 { 658 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 659 } 660 if f.funcID == funcID_systemstack_switch { 661 // A special routine at the bottom of stack of a goroutine that does a systemstack call. 662 // We will allow it to be copied even though we don't 663 // have full GC info for it (because it is written in asm). 664 return true 665 } 666 667 locals, args, objs := frame.getStackMap(&adjinfo.cache, true) 668 669 // Adjust local variables if stack frame has been allocated. 670 if locals.n > 0 { 671 size := uintptr(locals.n) * goarch.PtrSize 672 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 673 } 674 675 // Adjust saved base pointer if there is one. 676 // TODO what about arm64 frame pointer adjustment? 677 if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize { 678 if stackDebug >= 3 { 679 print(" saved bp\n") 680 } 681 if debugCheckBP { 682 // Frame pointers should always point to the next higher frame on 683 // the Go stack (or be nil, for the top frame on the stack). 684 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 685 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 686 println("runtime: found invalid frame pointer") 687 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 688 throw("bad frame pointer") 689 } 690 } 691 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 692 } 693 694 // Adjust arguments. 695 if args.n > 0 { 696 if stackDebug >= 3 { 697 print(" args\n") 698 } 699 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 700 } 701 702 // Adjust pointers in all stack objects (whether they are live or not). 703 // See comments in mgcmark.go:scanframeworker. 704 if frame.varp != 0 { 705 for i := range objs { 706 obj := &objs[i] 707 off := obj.off 708 base := frame.varp // locals base pointer 709 if off >= 0 { 710 base = frame.argp // arguments and return values base pointer 711 } 712 p := base + uintptr(off) 713 if p < frame.sp { 714 // Object hasn't been allocated in the frame yet. 715 // (Happens when the stack bounds check fails and 716 // we call into morestack.) 717 continue 718 } 719 ptrdata := obj.ptrdata() 720 gcdata := obj.gcdata() 721 var s *mspan 722 if obj.useGCProg() { 723 // See comments in mgcmark.go:scanstack 724 s = materializeGCProg(ptrdata, gcdata) 725 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 726 } 727 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize { 728 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 { 729 adjustpointer(adjinfo, unsafe.Pointer(p+i)) 730 } 731 } 732 if s != nil { 733 dematerializeGCProg(s) 734 } 735 } 736 } 737 738 return true 739 } 740 741 func adjustctxt(gp *g, adjinfo *adjustinfo) { 742 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 743 if !framepointer_enabled { 744 return 745 } 746 if debugCheckBP { 747 bp := gp.sched.bp 748 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 749 println("runtime: found invalid top frame pointer") 750 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 751 throw("bad top frame pointer") 752 } 753 } 754 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 755 } 756 757 func adjustdefers(gp *g, adjinfo *adjustinfo) { 758 // Adjust pointers in the Defer structs. 759 // We need to do this first because we need to adjust the 760 // defer.link fields so we always work on the new stack. 761 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer)) 762 for d := gp._defer; d != nil; d = d.link { 763 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 764 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 765 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 766 adjustpointer(adjinfo, unsafe.Pointer(&d.link)) 767 adjustpointer(adjinfo, unsafe.Pointer(&d.varp)) 768 adjustpointer(adjinfo, unsafe.Pointer(&d.fd)) 769 } 770 } 771 772 func adjustpanics(gp *g, adjinfo *adjustinfo) { 773 // Panics are on stack and already adjusted. 774 // Update pointer to head of list in G. 775 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 776 } 777 778 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 779 // the data elements pointed to by a SudoG structure 780 // might be in the stack. 781 for s := gp.waiting; s != nil; s = s.waitlink { 782 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 783 } 784 } 785 786 func fillstack(stk stack, b byte) { 787 for p := stk.lo; p < stk.hi; p++ { 788 *(*byte)(unsafe.Pointer(p)) = b 789 } 790 } 791 792 func findsghi(gp *g, stk stack) uintptr { 793 var sghi uintptr 794 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 795 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 796 if stk.lo <= p && p < stk.hi && p > sghi { 797 sghi = p 798 } 799 } 800 return sghi 801 } 802 803 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 804 // stack they refer to while synchronizing with concurrent channel 805 // operations. It returns the number of bytes of stack copied. 806 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 807 if gp.waiting == nil { 808 return 0 809 } 810 811 // Lock channels to prevent concurrent send/receive. 812 var lastc *hchan 813 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 814 if sg.c != lastc { 815 // There is a ranking cycle here between gscan bit and 816 // hchan locks. Normally, we only allow acquiring hchan 817 // locks and then getting a gscan bit. In this case, we 818 // already have the gscan bit. We allow acquiring hchan 819 // locks here as a special case, since a deadlock can't 820 // happen because the G involved must already be 821 // suspended. So, we get a special hchan lock rank here 822 // that is lower than gscan, but doesn't allow acquiring 823 // any other locks other than hchan. 824 lockWithRank(&sg.c.lock, lockRankHchanLeaf) 825 } 826 lastc = sg.c 827 } 828 829 // Adjust sudogs. 830 adjustsudogs(gp, adjinfo) 831 832 // Copy the part of the stack the sudogs point in to 833 // while holding the lock to prevent races on 834 // send/receive slots. 835 var sgsize uintptr 836 if adjinfo.sghi != 0 { 837 oldBot := adjinfo.old.hi - used 838 newBot := oldBot + adjinfo.delta 839 sgsize = adjinfo.sghi - oldBot 840 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 841 } 842 843 // Unlock channels. 844 lastc = nil 845 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 846 if sg.c != lastc { 847 unlock(&sg.c.lock) 848 } 849 lastc = sg.c 850 } 851 852 return sgsize 853 } 854 855 // Copies gp's stack to a new stack of a different size. 856 // Caller must have changed gp status to Gcopystack. 857 func copystack(gp *g, newsize uintptr) { 858 if gp.syscallsp != 0 { 859 throw("stack growth not allowed in system call") 860 } 861 old := gp.stack 862 if old.lo == 0 { 863 throw("nil stackbase") 864 } 865 used := old.hi - gp.sched.sp 866 // Add just the difference to gcController.addScannableStack. 867 // g0 stacks never move, so this will never account for them. 868 // It's also fine if we have no P, addScannableStack can deal with 869 // that case. 870 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo)) 871 872 // allocate new stack 873 new := stackalloc(uint32(newsize)) 874 if stackPoisonCopy != 0 { 875 fillstack(new, 0xfd) 876 } 877 if stackDebug >= 1 { 878 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 879 } 880 881 // Compute adjustment. 882 var adjinfo adjustinfo 883 adjinfo.old = old 884 adjinfo.delta = new.hi - old.hi 885 886 // Adjust sudogs, synchronizing with channel ops if necessary. 887 ncopy := used 888 if !gp.activeStackChans { 889 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() { 890 // It's not safe for someone to shrink this stack while we're actively 891 // parking on a channel, but it is safe to grow since we do that 892 // ourselves and explicitly don't want to synchronize with channels 893 // since we could self-deadlock. 894 throw("racy sudog adjustment due to parking on channel") 895 } 896 adjustsudogs(gp, &adjinfo) 897 } else { 898 // sudogs may be pointing in to the stack and gp has 899 // released channel locks, so other goroutines could 900 // be writing to gp's stack. Find the highest such 901 // pointer so we can handle everything there and below 902 // carefully. (This shouldn't be far from the bottom 903 // of the stack, so there's little cost in handling 904 // everything below it carefully.) 905 adjinfo.sghi = findsghi(gp, old) 906 907 // Synchronize with channel ops and copy the part of 908 // the stack they may interact with. 909 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 910 } 911 912 // Copy the stack (or the rest of it) to the new location 913 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 914 915 // Adjust remaining structures that have pointers into stacks. 916 // We have to do most of these before we traceback the new 917 // stack because gentraceback uses them. 918 adjustctxt(gp, &adjinfo) 919 adjustdefers(gp, &adjinfo) 920 adjustpanics(gp, &adjinfo) 921 if adjinfo.sghi != 0 { 922 adjinfo.sghi += adjinfo.delta 923 } 924 925 // Swap out old stack for new one 926 gp.stack = new 927 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 928 gp.sched.sp = new.hi - used 929 gp.stktopsp += adjinfo.delta 930 931 // Adjust pointers in the new stack. 932 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 933 934 // free old stack 935 if stackPoisonCopy != 0 { 936 fillstack(old, 0xfc) 937 } 938 stackfree(old) 939 } 940 941 // round x up to a power of 2. 942 func round2(x int32) int32 { 943 s := uint(0) 944 for 1<<s < x { 945 s++ 946 } 947 return 1 << s 948 } 949 950 // Called from runtime·morestack when more stack is needed. 951 // Allocate larger stack and relocate to new stack. 952 // Stack growth is multiplicative, for constant amortized cost. 953 // 954 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 955 // If the scheduler is trying to stop this g, then it will set preemptStop. 956 // 957 // This must be nowritebarrierrec because it can be called as part of 958 // stack growth from other nowritebarrierrec functions, but the 959 // compiler doesn't check this. 960 // 961 //go:nowritebarrierrec 962 func newstack() { 963 thisg := getg() 964 // TODO: double check all gp. shouldn't be getg(). 965 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 966 throw("stack growth after fork") 967 } 968 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 969 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 970 morebuf := thisg.m.morebuf 971 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 972 throw("runtime: wrong goroutine in newstack") 973 } 974 975 gp := thisg.m.curg 976 977 if thisg.m.curg.throwsplit { 978 // Update syscallsp, syscallpc in case traceback uses them. 979 morebuf := thisg.m.morebuf 980 gp.syscallsp = morebuf.sp 981 gp.syscallpc = morebuf.pc 982 pcname, pcoff := "(unknown)", uintptr(0) 983 f := findfunc(gp.sched.pc) 984 if f.valid() { 985 pcname = funcname(f) 986 pcoff = gp.sched.pc - f.entry() 987 } 988 print("runtime: newstack at ", pcname, "+", hex(pcoff), 989 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 990 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 991 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 992 993 thisg.m.traceback = 2 // Include runtime frames 994 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 995 throw("runtime: stack split at bad time") 996 } 997 998 morebuf := thisg.m.morebuf 999 thisg.m.morebuf.pc = 0 1000 thisg.m.morebuf.lr = 0 1001 thisg.m.morebuf.sp = 0 1002 thisg.m.morebuf.g = 0 1003 1004 // NOTE: stackguard0 may change underfoot, if another thread 1005 // is about to try to preempt gp. Read it just once and use that same 1006 // value now and below. 1007 stackguard0 := atomic.Loaduintptr(&gp.stackguard0) 1008 1009 // Be conservative about where we preempt. 1010 // We are interested in preempting user Go code, not runtime code. 1011 // If we're holding locks, mallocing, or preemption is disabled, don't 1012 // preempt. 1013 // This check is very early in newstack so that even the status change 1014 // from Grunning to Gwaiting and back doesn't happen in this case. 1015 // That status change by itself can be viewed as a small preemption, 1016 // because the GC might change Gwaiting to Gscanwaiting, and then 1017 // this goroutine has to wait for the GC to finish before continuing. 1018 // If the GC is in some way dependent on this goroutine (for example, 1019 // it needs a lock held by the goroutine), that small preemption turns 1020 // into a real deadlock. 1021 preempt := stackguard0 == stackPreempt 1022 if preempt { 1023 if !canPreemptM(thisg.m) { 1024 // Let the goroutine keep running for now. 1025 // gp->preempt is set, so it will be preempted next time. 1026 gp.stackguard0 = gp.stack.lo + _StackGuard 1027 gogo(&gp.sched) // never return 1028 } 1029 } 1030 1031 if gp.stack.lo == 0 { 1032 throw("missing stack in newstack") 1033 } 1034 sp := gp.sched.sp 1035 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM { 1036 // The call to morestack cost a word. 1037 sp -= goarch.PtrSize 1038 } 1039 if stackDebug >= 1 || sp < gp.stack.lo { 1040 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1041 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 1042 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1043 } 1044 if sp < gp.stack.lo { 1045 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ") 1046 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1047 throw("runtime: split stack overflow") 1048 } 1049 1050 if preempt { 1051 if gp == thisg.m.g0 { 1052 throw("runtime: preempt g0") 1053 } 1054 if thisg.m.p == 0 && thisg.m.locks == 0 { 1055 throw("runtime: g is running but p is not") 1056 } 1057 1058 if gp.preemptShrink { 1059 // We're at a synchronous safe point now, so 1060 // do the pending stack shrink. 1061 gp.preemptShrink = false 1062 shrinkstack(gp) 1063 } 1064 1065 if gp.preemptStop { 1066 preemptPark(gp) // never returns 1067 } 1068 1069 // Act like goroutine called runtime.Gosched. 1070 gopreempt_m(gp) // never return 1071 } 1072 1073 // Allocate a bigger segment and move the stack. 1074 oldsize := gp.stack.hi - gp.stack.lo 1075 newsize := oldsize * 2 1076 1077 // Make sure we grow at least as much as needed to fit the new frame. 1078 // (This is just an optimization - the caller of morestack will 1079 // recheck the bounds on return.) 1080 if f := findfunc(gp.sched.pc); f.valid() { 1081 max := uintptr(funcMaxSPDelta(f)) 1082 needed := max + _StackGuard 1083 used := gp.stack.hi - gp.sched.sp 1084 for newsize-used < needed { 1085 newsize *= 2 1086 } 1087 } 1088 1089 if stackguard0 == stackForceMove { 1090 // Forced stack movement used for debugging. 1091 // Don't double the stack (or we may quickly run out 1092 // if this is done repeatedly). 1093 newsize = oldsize 1094 } 1095 1096 if newsize > maxstacksize || newsize > maxstackceiling { 1097 if maxstacksize < maxstackceiling { 1098 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1099 } else { 1100 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n") 1101 } 1102 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1103 throw("stack overflow") 1104 } 1105 1106 // The goroutine must be executing in order to call newstack, 1107 // so it must be Grunning (or Gscanrunning). 1108 casgstatus(gp, _Grunning, _Gcopystack) 1109 1110 // The concurrent GC will not scan the stack while we are doing the copy since 1111 // the gp is in a Gcopystack status. 1112 copystack(gp, newsize) 1113 if stackDebug >= 1 { 1114 print("stack grow done\n") 1115 } 1116 casgstatus(gp, _Gcopystack, _Grunning) 1117 gogo(&gp.sched) 1118 } 1119 1120 //go:nosplit 1121 func nilfunc() { 1122 *(*uint8)(nil) = 0 1123 } 1124 1125 // adjust Gobuf as if it executed a call to fn 1126 // and then stopped before the first instruction in fn. 1127 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1128 var fn unsafe.Pointer 1129 if fv != nil { 1130 fn = unsafe.Pointer(fv.fn) 1131 } else { 1132 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc)) 1133 } 1134 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1135 } 1136 1137 // isShrinkStackSafe returns whether it's safe to attempt to shrink 1138 // gp's stack. Shrinking the stack is only safe when we have precise 1139 // pointer maps for all frames on the stack. 1140 func isShrinkStackSafe(gp *g) bool { 1141 // We can't copy the stack if we're in a syscall. 1142 // The syscall might have pointers into the stack and 1143 // often we don't have precise pointer maps for the innermost 1144 // frames. 1145 // 1146 // We also can't copy the stack if we're at an asynchronous 1147 // safe-point because we don't have precise pointer maps for 1148 // all frames. 1149 // 1150 // We also can't *shrink* the stack in the window between the 1151 // goroutine calling gopark to park on a channel and 1152 // gp.activeStackChans being set. 1153 return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load() 1154 } 1155 1156 // Maybe shrink the stack being used by gp. 1157 // 1158 // gp must be stopped and we must own its stack. It may be in 1159 // _Grunning, but only if this is our own user G. 1160 func shrinkstack(gp *g) { 1161 if gp.stack.lo == 0 { 1162 throw("missing stack in shrinkstack") 1163 } 1164 if s := readgstatus(gp); s&_Gscan == 0 { 1165 // We don't own the stack via _Gscan. We could still 1166 // own it if this is our own user G and we're on the 1167 // system stack. 1168 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) { 1169 // We don't own the stack. 1170 throw("bad status in shrinkstack") 1171 } 1172 } 1173 if !isShrinkStackSafe(gp) { 1174 throw("shrinkstack at bad time") 1175 } 1176 // Check for self-shrinks while in a libcall. These may have 1177 // pointers into the stack disguised as uintptrs, but these 1178 // code paths should all be nosplit. 1179 if gp == getg().m.curg && gp.m.libcallsp != 0 { 1180 throw("shrinking stack in libcall") 1181 } 1182 1183 if debug.gcshrinkstackoff > 0 { 1184 return 1185 } 1186 f := findfunc(gp.startpc) 1187 if f.valid() && f.funcID == funcID_gcBgMarkWorker { 1188 // We're not allowed to shrink the gcBgMarkWorker 1189 // stack (see gcBgMarkWorker for explanation). 1190 return 1191 } 1192 1193 oldsize := gp.stack.hi - gp.stack.lo 1194 newsize := oldsize / 2 1195 // Don't shrink the allocation below the minimum-sized stack 1196 // allocation. 1197 if newsize < _FixedStack { 1198 return 1199 } 1200 // Compute how much of the stack is currently in use and only 1201 // shrink the stack if gp is using less than a quarter of its 1202 // current stack. The currently used stack includes everything 1203 // down to the SP plus the stack guard space that ensures 1204 // there's room for nosplit functions. 1205 avail := gp.stack.hi - gp.stack.lo 1206 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1207 return 1208 } 1209 1210 if stackDebug > 0 { 1211 print("shrinking stack ", oldsize, "->", newsize, "\n") 1212 } 1213 1214 copystack(gp, newsize) 1215 } 1216 1217 // freeStackSpans frees unused stack spans at the end of GC. 1218 func freeStackSpans() { 1219 // Scan stack pools for empty stack spans. 1220 for order := range stackpool { 1221 lock(&stackpool[order].item.mu) 1222 list := &stackpool[order].item.span 1223 for s := list.first; s != nil; { 1224 next := s.next 1225 if s.allocCount == 0 { 1226 list.remove(s) 1227 s.manualFreeList = 0 1228 osStackFree(s) 1229 mheap_.freeManual(s, spanAllocStack) 1230 } 1231 s = next 1232 } 1233 unlock(&stackpool[order].item.mu) 1234 } 1235 1236 // Free large stack spans. 1237 lock(&stackLarge.lock) 1238 for i := range stackLarge.free { 1239 for s := stackLarge.free[i].first; s != nil; { 1240 next := s.next 1241 stackLarge.free[i].remove(s) 1242 osStackFree(s) 1243 mheap_.freeManual(s, spanAllocStack) 1244 s = next 1245 } 1246 } 1247 unlock(&stackLarge.lock) 1248 } 1249 1250 // A stackObjectRecord is generated by the compiler for each stack object in a stack frame. 1251 // This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects. 1252 type stackObjectRecord struct { 1253 // offset in frame 1254 // if negative, offset from varp 1255 // if non-negative, offset from argp 1256 off int32 1257 size int32 1258 _ptrdata int32 // ptrdata, or -ptrdata is GC prog is used 1259 gcdataoff uint32 // offset to gcdata from moduledata.rodata 1260 } 1261 1262 func (r *stackObjectRecord) useGCProg() bool { 1263 return r._ptrdata < 0 1264 } 1265 1266 func (r *stackObjectRecord) ptrdata() uintptr { 1267 x := r._ptrdata 1268 if x < 0 { 1269 return uintptr(-x) 1270 } 1271 return uintptr(x) 1272 } 1273 1274 // gcdata returns pointer map or GC prog of the type. 1275 func (r *stackObjectRecord) gcdata() *byte { 1276 ptr := uintptr(unsafe.Pointer(r)) 1277 var mod *moduledata 1278 for datap := &firstmoduledata; datap != nil; datap = datap.next { 1279 if datap.gofunc <= ptr && ptr < datap.end { 1280 mod = datap 1281 break 1282 } 1283 } 1284 // If you get a panic here due to a nil mod, 1285 // you may have made a copy of a stackObjectRecord. 1286 // You must use the original pointer. 1287 res := mod.rodata + uintptr(r.gcdataoff) 1288 return (*byte)(unsafe.Pointer(res)) 1289 } 1290 1291 // This is exported as ABI0 via linkname so obj can call it. 1292 // 1293 //go:nosplit 1294 //go:linkname morestackc 1295 func morestackc() { 1296 throw("attempt to execute system stack code on user stack") 1297 } 1298 1299 // startingStackSize is the amount of stack that new goroutines start with. 1300 // It is a power of 2, and between _FixedStack and maxstacksize, inclusive. 1301 // startingStackSize is updated every GC by tracking the average size of 1302 // stacks scanned during the GC. 1303 var startingStackSize uint32 = _FixedStack 1304 1305 func gcComputeStartingStackSize() { 1306 if debug.adaptivestackstart == 0 { 1307 return 1308 } 1309 // For details, see the design doc at 1310 // https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing 1311 // The basic algorithm is to track the average size of stacks 1312 // and start goroutines with stack equal to that average size. 1313 // Starting at the average size uses at most 2x the space that 1314 // an ideal algorithm would have used. 1315 // This is just a heuristic to avoid excessive stack growth work 1316 // early in a goroutine's lifetime. See issue 18138. Stacks that 1317 // are allocated too small can still grow, and stacks allocated 1318 // too large can still shrink. 1319 var scannedStackSize uint64 1320 var scannedStacks uint64 1321 for _, p := range allp { 1322 scannedStackSize += p.scannedStackSize 1323 scannedStacks += p.scannedStacks 1324 // Reset for next time 1325 p.scannedStackSize = 0 1326 p.scannedStacks = 0 1327 } 1328 if scannedStacks == 0 { 1329 startingStackSize = _FixedStack 1330 return 1331 } 1332 avg := scannedStackSize/scannedStacks + _StackGuard 1333 // Note: we add _StackGuard to ensure that a goroutine that 1334 // uses the average space will not trigger a growth. 1335 if avg > uint64(maxstacksize) { 1336 avg = uint64(maxstacksize) 1337 } 1338 if avg < _FixedStack { 1339 avg = _FixedStack 1340 } 1341 // Note: maxstacksize fits in 30 bits, so avg also does. 1342 startingStackSize = uint32(round2(int32(avg))) 1343 }