github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 const ( 106 // stackDebug == 0: no logging 107 // == 1: logging of per-stack operations 108 // == 2: logging of per-frame operations 109 // == 3: logging of per-word updates 110 // == 4: logging of per-word reads 111 stackDebug = 0 112 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 113 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 114 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 115 stackNoCache = 0 // disable per-P small stack caches 116 117 // check the BP links during traceback. 118 debugCheckBP = false 119 ) 120 121 const ( 122 uintptrMask = 1<<(8*sys.PtrSize) - 1 123 124 // Goroutine preemption request. 125 // Stored into g->stackguard0 to cause split stack check failure. 126 // Must be greater than any real sp. 127 // 0xfffffade in hex. 128 stackPreempt = uintptrMask & -1314 129 130 // Thread is forking. 131 // Stored into g->stackguard0 to cause split stack check failure. 132 // Must be greater than any real sp. 133 stackFork = uintptrMask & -1234 134 ) 135 136 // Global pool of spans that have free stacks. 137 // Stacks are assigned an order according to size. 138 // order = log_2(size/FixedStack) 139 // There is a free list for each order. 140 // TODO: one lock per order? 141 var stackpool [_NumStackOrders]mSpanList 142 var stackpoolmu mutex 143 144 // Global pool of large stack spans. 145 var stackLarge struct { 146 lock mutex 147 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 148 } 149 150 func stackinit() { 151 if _StackCacheSize&_PageMask != 0 { 152 throw("cache size must be a multiple of page size") 153 } 154 for i := range stackpool { 155 stackpool[i].init() 156 } 157 for i := range stackLarge.free { 158 stackLarge.free[i].init() 159 } 160 } 161 162 // stacklog2 returns ⌊log_2(n)⌋. 163 func stacklog2(n uintptr) int { 164 log2 := 0 165 for n > 1 { 166 n >>= 1 167 log2++ 168 } 169 return log2 170 } 171 172 // Allocates a stack from the free pool. Must be called with 173 // stackpoolmu held. 174 func stackpoolalloc(order uint8) gclinkptr { 175 list := &stackpool[order] 176 s := list.first 177 if s == nil { 178 // no free stacks. Allocate another span worth. 179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) 180 if s == nil { 181 throw("out of memory") 182 } 183 if s.allocCount != 0 { 184 throw("bad allocCount") 185 } 186 if s.manualFreeList.ptr() != nil { 187 throw("bad manualFreeList") 188 } 189 s.elemsize = _FixedStack << order 190 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 191 x := gclinkptr(s.base() + i) 192 x.ptr().next = s.manualFreeList 193 s.manualFreeList = x 194 } 195 list.insert(s) 196 } 197 x := s.manualFreeList 198 if x.ptr() == nil { 199 throw("span has no free stacks") 200 } 201 s.manualFreeList = x.ptr().next 202 s.allocCount++ 203 if s.manualFreeList.ptr() == nil { 204 // all stacks in s are allocated. 205 list.remove(s) 206 } 207 return x 208 } 209 210 // Adds stack x to the free pool. Must be called with stackpoolmu held. 211 func stackpoolfree(x gclinkptr, order uint8) { 212 s := mheap_.lookup(unsafe.Pointer(x)) 213 if s.state != _MSpanManual { 214 throw("freeing stack not in a stack span") 215 } 216 if s.manualFreeList.ptr() == nil { 217 // s will now have a free stack 218 stackpool[order].insert(s) 219 } 220 x.ptr().next = s.manualFreeList 221 s.manualFreeList = x 222 s.allocCount-- 223 if gcphase == _GCoff && s.allocCount == 0 { 224 // Span is completely free. Return it to the heap 225 // immediately if we're sweeping. 226 // 227 // If GC is active, we delay the free until the end of 228 // GC to avoid the following type of situation: 229 // 230 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 231 // 2) The stack that pointer points to is copied 232 // 3) The old stack is freed 233 // 4) The containing span is marked free 234 // 5) GC attempts to mark the SudoG.elem pointer. The 235 // marking fails because the pointer looks like a 236 // pointer into a free span. 237 // 238 // By not freeing, we prevent step #4 until GC is done. 239 stackpool[order].remove(s) 240 s.manualFreeList = 0 241 mheap_.freeManual(s, &memstats.stacks_inuse) 242 } 243 } 244 245 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 246 // The pool is required to prevent unlimited growth of per-thread caches. 247 // 248 //go:systemstack 249 func stackcacherefill(c *mcache, order uint8) { 250 if stackDebug >= 1 { 251 print("stackcacherefill order=", order, "\n") 252 } 253 254 // Grab some stacks from the global cache. 255 // Grab half of the allowed capacity (to prevent thrashing). 256 var list gclinkptr 257 var size uintptr 258 lock(&stackpoolmu) 259 for size < _StackCacheSize/2 { 260 x := stackpoolalloc(order) 261 x.ptr().next = list 262 list = x 263 size += _FixedStack << order 264 } 265 unlock(&stackpoolmu) 266 c.stackcache[order].list = list 267 c.stackcache[order].size = size 268 } 269 270 //go:systemstack 271 func stackcacherelease(c *mcache, order uint8) { 272 if stackDebug >= 1 { 273 print("stackcacherelease order=", order, "\n") 274 } 275 x := c.stackcache[order].list 276 size := c.stackcache[order].size 277 lock(&stackpoolmu) 278 for size > _StackCacheSize/2 { 279 y := x.ptr().next 280 stackpoolfree(x, order) 281 x = y 282 size -= _FixedStack << order 283 } 284 unlock(&stackpoolmu) 285 c.stackcache[order].list = x 286 c.stackcache[order].size = size 287 } 288 289 //go:systemstack 290 func stackcache_clear(c *mcache) { 291 if stackDebug >= 1 { 292 print("stackcache clear\n") 293 } 294 lock(&stackpoolmu) 295 for order := uint8(0); order < _NumStackOrders; order++ { 296 x := c.stackcache[order].list 297 for x.ptr() != nil { 298 y := x.ptr().next 299 stackpoolfree(x, order) 300 x = y 301 } 302 c.stackcache[order].list = 0 303 c.stackcache[order].size = 0 304 } 305 unlock(&stackpoolmu) 306 } 307 308 // stackalloc allocates an n byte stack. 309 // 310 // stackalloc must run on the system stack because it uses per-P 311 // resources and must not split the stack. 312 // 313 //go:systemstack 314 func stackalloc(n uint32) stack { 315 // Stackalloc must be called on scheduler stack, so that we 316 // never try to grow the stack during the code that stackalloc runs. 317 // Doing so would cause a deadlock (issue 1547). 318 thisg := getg() 319 if thisg != thisg.m.g0 { 320 throw("stackalloc not on scheduler stack") 321 } 322 if n&(n-1) != 0 { 323 throw("stack size not a power of 2") 324 } 325 if stackDebug >= 1 { 326 print("stackalloc ", n, "\n") 327 } 328 329 if debug.efence != 0 || stackFromSystem != 0 { 330 n = uint32(round(uintptr(n), physPageSize)) 331 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 332 if v == nil { 333 throw("out of memory (stackalloc)") 334 } 335 return stack{uintptr(v), uintptr(v) + uintptr(n)} 336 } 337 338 // Small stacks are allocated with a fixed-size free-list allocator. 339 // If we need a stack of a bigger size, we fall back on allocating 340 // a dedicated span. 341 var v unsafe.Pointer 342 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 343 order := uint8(0) 344 n2 := n 345 for n2 > _FixedStack { 346 order++ 347 n2 >>= 1 348 } 349 var x gclinkptr 350 c := thisg.m.mcache 351 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 352 // c == nil can happen in the guts of exitsyscall or 353 // procresize. Just get a stack from the global pool. 354 // Also don't touch stackcache during gc 355 // as it's flushed concurrently. 356 lock(&stackpoolmu) 357 x = stackpoolalloc(order) 358 unlock(&stackpoolmu) 359 } else { 360 x = c.stackcache[order].list 361 if x.ptr() == nil { 362 stackcacherefill(c, order) 363 x = c.stackcache[order].list 364 } 365 c.stackcache[order].list = x.ptr().next 366 c.stackcache[order].size -= uintptr(n) 367 } 368 v = unsafe.Pointer(x) 369 } else { 370 var s *mspan 371 npage := uintptr(n) >> _PageShift 372 log2npage := stacklog2(npage) 373 374 // Try to get a stack from the large stack cache. 375 lock(&stackLarge.lock) 376 if !stackLarge.free[log2npage].isEmpty() { 377 s = stackLarge.free[log2npage].first 378 stackLarge.free[log2npage].remove(s) 379 } 380 unlock(&stackLarge.lock) 381 382 if s == nil { 383 // Allocate a new stack from the heap. 384 s = mheap_.allocManual(npage, &memstats.stacks_inuse) 385 if s == nil { 386 throw("out of memory") 387 } 388 s.elemsize = uintptr(n) 389 } 390 v = unsafe.Pointer(s.base()) 391 } 392 393 if raceenabled { 394 racemalloc(v, uintptr(n)) 395 } 396 if msanenabled { 397 msanmalloc(v, uintptr(n)) 398 } 399 if stackDebug >= 1 { 400 print(" allocated ", v, "\n") 401 } 402 return stack{uintptr(v), uintptr(v) + uintptr(n)} 403 } 404 405 // stackfree frees an n byte stack allocation at stk. 406 // 407 // stackfree must run on the system stack because it uses per-P 408 // resources and must not split the stack. 409 // 410 //go:systemstack 411 func stackfree(stk stack) { 412 gp := getg() 413 v := unsafe.Pointer(stk.lo) 414 n := stk.hi - stk.lo 415 if n&(n-1) != 0 { 416 throw("stack not a power of 2") 417 } 418 if stk.lo+n < stk.hi { 419 throw("bad stack size") 420 } 421 if stackDebug >= 1 { 422 println("stackfree", v, n) 423 memclrNoHeapPointers(v, n) // for testing, clobber stack data 424 } 425 if debug.efence != 0 || stackFromSystem != 0 { 426 if debug.efence != 0 || stackFaultOnFree != 0 { 427 sysFault(v, n) 428 } else { 429 sysFree(v, n, &memstats.stacks_sys) 430 } 431 return 432 } 433 if msanenabled { 434 msanfree(v, n) 435 } 436 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 437 order := uint8(0) 438 n2 := n 439 for n2 > _FixedStack { 440 order++ 441 n2 >>= 1 442 } 443 x := gclinkptr(v) 444 c := gp.m.mcache 445 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 446 lock(&stackpoolmu) 447 stackpoolfree(x, order) 448 unlock(&stackpoolmu) 449 } else { 450 if c.stackcache[order].size >= _StackCacheSize { 451 stackcacherelease(c, order) 452 } 453 x.ptr().next = c.stackcache[order].list 454 c.stackcache[order].list = x 455 c.stackcache[order].size += n 456 } 457 } else { 458 s := mheap_.lookup(v) 459 if s.state != _MSpanManual { 460 println(hex(s.base()), v) 461 throw("bad span state") 462 } 463 if gcphase == _GCoff { 464 // Free the stack immediately if we're 465 // sweeping. 466 mheap_.freeManual(s, &memstats.stacks_inuse) 467 } else { 468 // If the GC is running, we can't return a 469 // stack span to the heap because it could be 470 // reused as a heap span, and this state 471 // change would race with GC. Add it to the 472 // large stack cache instead. 473 log2npage := stacklog2(s.npages) 474 lock(&stackLarge.lock) 475 stackLarge.free[log2npage].insert(s) 476 unlock(&stackLarge.lock) 477 } 478 } 479 } 480 481 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 482 483 var ptrnames = []string{ 484 0: "scalar", 485 1: "ptr", 486 } 487 488 // Stack frame layout 489 // 490 // (x86) 491 // +------------------+ 492 // | args from caller | 493 // +------------------+ <- frame->argp 494 // | return address | 495 // +------------------+ 496 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 497 // +------------------+ <- frame->varp 498 // | locals | 499 // +------------------+ 500 // | args to callee | 501 // +------------------+ <- frame->sp 502 // 503 // (arm) 504 // +------------------+ 505 // | args from caller | 506 // +------------------+ <- frame->argp 507 // | caller's retaddr | 508 // +------------------+ <- frame->varp 509 // | locals | 510 // +------------------+ 511 // | args to callee | 512 // +------------------+ 513 // | return address | 514 // +------------------+ <- frame->sp 515 516 type adjustinfo struct { 517 old stack 518 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 519 cache pcvalueCache 520 521 // sghi is the highest sudog.elem on the stack. 522 sghi uintptr 523 } 524 525 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 526 // If so, it rewrites *vpp to point into the new stack. 527 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 528 pp := (*uintptr)(vpp) 529 p := *pp 530 if stackDebug >= 4 { 531 print(" ", pp, ":", hex(p), "\n") 532 } 533 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 534 *pp = p + adjinfo.delta 535 if stackDebug >= 3 { 536 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 537 } 538 } 539 } 540 541 // Information from the compiler about the layout of stack frames. 542 type bitvector struct { 543 n int32 // # of bits 544 bytedata *uint8 545 } 546 547 type gobitvector struct { 548 n uintptr 549 bytedata []uint8 550 } 551 552 func gobv(bv bitvector) gobitvector { 553 return gobitvector{ 554 uintptr(bv.n), 555 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 556 } 557 } 558 559 func ptrbit(bv *gobitvector, i uintptr) uint8 { 560 return (bv.bytedata[i/8] >> (i % 8)) & 1 561 } 562 563 // bv describes the memory starting at address scanp. 564 // Adjust any pointers contained therein. 565 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) { 566 bv := gobv(*cbv) 567 minp := adjinfo.old.lo 568 maxp := adjinfo.old.hi 569 delta := adjinfo.delta 570 num := bv.n 571 // If this frame might contain channel receive slots, use CAS 572 // to adjust pointers. If the slot hasn't been received into 573 // yet, it may contain stack pointers and a concurrent send 574 // could race with adjusting those pointers. (The sent value 575 // itself can never contain stack pointers.) 576 useCAS := uintptr(scanp) < adjinfo.sghi 577 for i := uintptr(0); i < num; i++ { 578 if stackDebug >= 4 { 579 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 580 } 581 if ptrbit(&bv, i) == 1 { 582 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 583 retry: 584 p := *pp 585 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 586 // Looks like a junk value in a pointer slot. 587 // Live analysis wrong? 588 getg().m.traceback = 2 589 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 590 throw("invalid pointer found on stack") 591 } 592 if minp <= p && p < maxp { 593 if stackDebug >= 3 { 594 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 595 } 596 if useCAS { 597 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 598 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 599 goto retry 600 } 601 } else { 602 *pp = p + delta 603 } 604 } 605 } 606 } 607 } 608 609 // Note: the argument/return area is adjusted by the callee. 610 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 611 adjinfo := (*adjustinfo)(arg) 612 targetpc := frame.continpc 613 if targetpc == 0 { 614 // Frame is dead. 615 return true 616 } 617 f := frame.fn 618 if stackDebug >= 2 { 619 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 620 } 621 if f.entry == systemstack_switchPC { 622 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 623 // We will allow it to be copied even though we don't 624 // have full GC info for it (because it is written in asm). 625 return true 626 } 627 if targetpc != f.entry { 628 targetpc-- 629 } 630 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 631 if pcdata == -1 { 632 pcdata = 0 // in prologue 633 } 634 635 // Adjust local variables if stack frame has been allocated. 636 size := frame.varp - frame.sp 637 var minsize uintptr 638 switch sys.ArchFamily { 639 case sys.ARM64: 640 minsize = sys.SpAlign 641 default: 642 minsize = sys.MinFrameSize 643 } 644 if size > minsize { 645 var bv bitvector 646 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 647 if stackmap == nil || stackmap.n <= 0 { 648 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 649 throw("missing stackmap") 650 } 651 // Locals bitmap information, scan just the pointers in locals. 652 if pcdata < 0 || pcdata >= stackmap.n { 653 // don't know where we are 654 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 655 throw("bad symbol table") 656 } 657 bv = stackmapdata(stackmap, pcdata) 658 size = uintptr(bv.n) * sys.PtrSize 659 if stackDebug >= 3 { 660 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 661 } 662 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 663 } 664 665 // Adjust saved base pointer if there is one. 666 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 667 if !framepointer_enabled { 668 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 669 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 670 throw("bad frame layout") 671 } 672 if stackDebug >= 3 { 673 print(" saved bp\n") 674 } 675 if debugCheckBP { 676 // Frame pointers should always point to the next higher frame on 677 // the Go stack (or be nil, for the top frame on the stack). 678 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 679 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 680 println("runtime: found invalid frame pointer") 681 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 682 throw("bad frame pointer") 683 } 684 } 685 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 686 } 687 688 // Adjust arguments. 689 if frame.arglen > 0 { 690 var bv bitvector 691 if frame.argmap != nil { 692 bv = *frame.argmap 693 } else { 694 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 695 if stackmap == nil || stackmap.n <= 0 { 696 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 697 throw("missing stackmap") 698 } 699 if pcdata < 0 || pcdata >= stackmap.n { 700 // don't know where we are 701 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 702 throw("bad symbol table") 703 } 704 bv = stackmapdata(stackmap, pcdata) 705 } 706 if stackDebug >= 3 { 707 print(" args\n") 708 } 709 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{}) 710 } 711 return true 712 } 713 714 func adjustctxt(gp *g, adjinfo *adjustinfo) { 715 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 716 if !framepointer_enabled { 717 return 718 } 719 if debugCheckBP { 720 bp := gp.sched.bp 721 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 722 println("runtime: found invalid top frame pointer") 723 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 724 throw("bad top frame pointer") 725 } 726 } 727 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 728 } 729 730 func adjustdefers(gp *g, adjinfo *adjustinfo) { 731 // Adjust defer argument blocks the same way we adjust active stack frames. 732 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 733 734 // Adjust pointers in the Defer structs. 735 // Defer structs themselves are never on the stack. 736 for d := gp._defer; d != nil; d = d.link { 737 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 738 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 739 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 740 } 741 } 742 743 func adjustpanics(gp *g, adjinfo *adjustinfo) { 744 // Panics are on stack and already adjusted. 745 // Update pointer to head of list in G. 746 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 747 } 748 749 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 750 // the data elements pointed to by a SudoG structure 751 // might be in the stack. 752 for s := gp.waiting; s != nil; s = s.waitlink { 753 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 754 } 755 } 756 757 func fillstack(stk stack, b byte) { 758 for p := stk.lo; p < stk.hi; p++ { 759 *(*byte)(unsafe.Pointer(p)) = b 760 } 761 } 762 763 func findsghi(gp *g, stk stack) uintptr { 764 var sghi uintptr 765 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 766 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 767 if stk.lo <= p && p < stk.hi && p > sghi { 768 sghi = p 769 } 770 } 771 return sghi 772 } 773 774 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 775 // stack they refer to while synchronizing with concurrent channel 776 // operations. It returns the number of bytes of stack copied. 777 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 778 if gp.waiting == nil { 779 return 0 780 } 781 782 // Lock channels to prevent concurrent send/receive. 783 // It's important that we *only* do this for async 784 // copystack; otherwise, gp may be in the middle of 785 // putting itself on wait queues and this would 786 // self-deadlock. 787 var lastc *hchan 788 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 789 if sg.c != lastc { 790 lock(&sg.c.lock) 791 } 792 lastc = sg.c 793 } 794 795 // Adjust sudogs. 796 adjustsudogs(gp, adjinfo) 797 798 // Copy the part of the stack the sudogs point in to 799 // while holding the lock to prevent races on 800 // send/receive slots. 801 var sgsize uintptr 802 if adjinfo.sghi != 0 { 803 oldBot := adjinfo.old.hi - used 804 newBot := oldBot + adjinfo.delta 805 sgsize = adjinfo.sghi - oldBot 806 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 807 } 808 809 // Unlock channels. 810 lastc = nil 811 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 812 if sg.c != lastc { 813 unlock(&sg.c.lock) 814 } 815 lastc = sg.c 816 } 817 818 return sgsize 819 } 820 821 // Copies gp's stack to a new stack of a different size. 822 // Caller must have changed gp status to Gcopystack. 823 // 824 // If sync is true, this is a self-triggered stack growth and, in 825 // particular, no other G may be writing to gp's stack (e.g., via a 826 // channel operation). If sync is false, copystack protects against 827 // concurrent channel operations. 828 func copystack(gp *g, newsize uintptr, sync bool) { 829 if gp.syscallsp != 0 { 830 throw("stack growth not allowed in system call") 831 } 832 old := gp.stack 833 if old.lo == 0 { 834 throw("nil stackbase") 835 } 836 used := old.hi - gp.sched.sp 837 838 // allocate new stack 839 new := stackalloc(uint32(newsize)) 840 if stackPoisonCopy != 0 { 841 fillstack(new, 0xfd) 842 } 843 if stackDebug >= 1 { 844 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 845 } 846 847 // Compute adjustment. 848 var adjinfo adjustinfo 849 adjinfo.old = old 850 adjinfo.delta = new.hi - old.hi 851 852 // Adjust sudogs, synchronizing with channel ops if necessary. 853 ncopy := used 854 if sync { 855 adjustsudogs(gp, &adjinfo) 856 } else { 857 // sudogs can point in to the stack. During concurrent 858 // shrinking, these areas may be written to. Find the 859 // highest such pointer so we can handle everything 860 // there and below carefully. (This shouldn't be far 861 // from the bottom of the stack, so there's little 862 // cost in handling everything below it carefully.) 863 adjinfo.sghi = findsghi(gp, old) 864 865 // Synchronize with channel ops and copy the part of 866 // the stack they may interact with. 867 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 868 } 869 870 // Copy the stack (or the rest of it) to the new location 871 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 872 873 // Adjust remaining structures that have pointers into stacks. 874 // We have to do most of these before we traceback the new 875 // stack because gentraceback uses them. 876 adjustctxt(gp, &adjinfo) 877 adjustdefers(gp, &adjinfo) 878 adjustpanics(gp, &adjinfo) 879 if adjinfo.sghi != 0 { 880 adjinfo.sghi += adjinfo.delta 881 } 882 883 // Swap out old stack for new one 884 gp.stack = new 885 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 886 gp.sched.sp = new.hi - used 887 gp.stktopsp += adjinfo.delta 888 889 // Adjust pointers in the new stack. 890 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 891 892 // free old stack 893 if stackPoisonCopy != 0 { 894 fillstack(old, 0xfc) 895 } 896 stackfree(old) 897 } 898 899 // round x up to a power of 2. 900 func round2(x int32) int32 { 901 s := uint(0) 902 for 1<<s < x { 903 s++ 904 } 905 return 1 << s 906 } 907 908 // Called from runtime·morestack when more stack is needed. 909 // Allocate larger stack and relocate to new stack. 910 // Stack growth is multiplicative, for constant amortized cost. 911 // 912 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 913 // If the GC is trying to stop this g then it will set preemptscan to true. 914 // 915 // ctxt is the value of the context register on morestack. newstack 916 // will write it to g.sched.ctxt. 917 func newstack(ctxt unsafe.Pointer) { 918 thisg := getg() 919 // TODO: double check all gp. shouldn't be getg(). 920 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 921 throw("stack growth after fork") 922 } 923 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 924 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 925 morebuf := thisg.m.morebuf 926 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 927 throw("runtime: wrong goroutine in newstack") 928 } 929 930 gp := thisg.m.curg 931 // Write ctxt to gp.sched. We do this here instead of in 932 // morestack so it has the necessary write barrier. 933 gp.sched.ctxt = ctxt 934 935 if thisg.m.curg.throwsplit { 936 // Update syscallsp, syscallpc in case traceback uses them. 937 morebuf := thisg.m.morebuf 938 gp.syscallsp = morebuf.sp 939 gp.syscallpc = morebuf.pc 940 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 941 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 942 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 943 944 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 945 throw("runtime: stack split at bad time") 946 } 947 948 morebuf := thisg.m.morebuf 949 thisg.m.morebuf.pc = 0 950 thisg.m.morebuf.lr = 0 951 thisg.m.morebuf.sp = 0 952 thisg.m.morebuf.g = 0 953 954 // NOTE: stackguard0 may change underfoot, if another thread 955 // is about to try to preempt gp. Read it just once and use that same 956 // value now and below. 957 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 958 959 // Be conservative about where we preempt. 960 // We are interested in preempting user Go code, not runtime code. 961 // If we're holding locks, mallocing, or preemption is disabled, don't 962 // preempt. 963 // This check is very early in newstack so that even the status change 964 // from Grunning to Gwaiting and back doesn't happen in this case. 965 // That status change by itself can be viewed as a small preemption, 966 // because the GC might change Gwaiting to Gscanwaiting, and then 967 // this goroutine has to wait for the GC to finish before continuing. 968 // If the GC is in some way dependent on this goroutine (for example, 969 // it needs a lock held by the goroutine), that small preemption turns 970 // into a real deadlock. 971 if preempt { 972 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 973 // Let the goroutine keep running for now. 974 // gp->preempt is set, so it will be preempted next time. 975 gp.stackguard0 = gp.stack.lo + _StackGuard 976 gogo(&gp.sched) // never return 977 } 978 } 979 980 if gp.stack.lo == 0 { 981 throw("missing stack in newstack") 982 } 983 sp := gp.sched.sp 984 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 985 // The call to morestack cost a word. 986 sp -= sys.PtrSize 987 } 988 if stackDebug >= 1 || sp < gp.stack.lo { 989 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 990 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 991 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 992 } 993 if sp < gp.stack.lo { 994 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 995 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 996 throw("runtime: split stack overflow") 997 } 998 999 if preempt { 1000 if gp == thisg.m.g0 { 1001 throw("runtime: preempt g0") 1002 } 1003 if thisg.m.p == 0 && thisg.m.locks == 0 { 1004 throw("runtime: g is running but p is not") 1005 } 1006 // Synchronize with scang. 1007 casgstatus(gp, _Grunning, _Gwaiting) 1008 if gp.preemptscan { 1009 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1010 // Likely to be racing with the GC as 1011 // it sees a _Gwaiting and does the 1012 // stack scan. If so, gcworkdone will 1013 // be set and gcphasework will simply 1014 // return. 1015 } 1016 if !gp.gcscandone { 1017 // gcw is safe because we're on the 1018 // system stack. 1019 gcw := &gp.m.p.ptr().gcw 1020 scanstack(gp, gcw) 1021 if gcBlackenPromptly { 1022 gcw.dispose() 1023 } 1024 gp.gcscandone = true 1025 } 1026 gp.preemptscan = false 1027 gp.preempt = false 1028 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1029 // This clears gcscanvalid. 1030 casgstatus(gp, _Gwaiting, _Grunning) 1031 gp.stackguard0 = gp.stack.lo + _StackGuard 1032 gogo(&gp.sched) // never return 1033 } 1034 1035 // Act like goroutine called runtime.Gosched. 1036 casgstatus(gp, _Gwaiting, _Grunning) 1037 gopreempt_m(gp) // never return 1038 } 1039 1040 // Allocate a bigger segment and move the stack. 1041 oldsize := gp.stack.hi - gp.stack.lo 1042 newsize := oldsize * 2 1043 if newsize > maxstacksize { 1044 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1045 throw("stack overflow") 1046 } 1047 1048 // The goroutine must be executing in order to call newstack, 1049 // so it must be Grunning (or Gscanrunning). 1050 casgstatus(gp, _Grunning, _Gcopystack) 1051 1052 // The concurrent GC will not scan the stack while we are doing the copy since 1053 // the gp is in a Gcopystack status. 1054 copystack(gp, newsize, true) 1055 if stackDebug >= 1 { 1056 print("stack grow done\n") 1057 } 1058 casgstatus(gp, _Gcopystack, _Grunning) 1059 gogo(&gp.sched) 1060 } 1061 1062 //go:nosplit 1063 func nilfunc() { 1064 *(*uint8)(nil) = 0 1065 } 1066 1067 // adjust Gobuf as if it executed a call to fn 1068 // and then did an immediate gosave. 1069 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1070 var fn unsafe.Pointer 1071 if fv != nil { 1072 fn = unsafe.Pointer(fv.fn) 1073 } else { 1074 fn = unsafe.Pointer(funcPC(nilfunc)) 1075 } 1076 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1077 } 1078 1079 // Maybe shrink the stack being used by gp. 1080 // Called at garbage collection time. 1081 // gp must be stopped, but the world need not be. 1082 func shrinkstack(gp *g) { 1083 gstatus := readgstatus(gp) 1084 if gstatus&^_Gscan == _Gdead { 1085 if gp.stack.lo != 0 { 1086 // Free whole stack - it will get reallocated 1087 // if G is used again. 1088 stackfree(gp.stack) 1089 gp.stack.lo = 0 1090 gp.stack.hi = 0 1091 } 1092 return 1093 } 1094 if gp.stack.lo == 0 { 1095 throw("missing stack in shrinkstack") 1096 } 1097 if gstatus&_Gscan == 0 { 1098 throw("bad status in shrinkstack") 1099 } 1100 1101 if debug.gcshrinkstackoff > 0 { 1102 return 1103 } 1104 if gp.startpc == gcBgMarkWorkerPC { 1105 // We're not allowed to shrink the gcBgMarkWorker 1106 // stack (see gcBgMarkWorker for explanation). 1107 return 1108 } 1109 1110 oldsize := gp.stack.hi - gp.stack.lo 1111 newsize := oldsize / 2 1112 // Don't shrink the allocation below the minimum-sized stack 1113 // allocation. 1114 if newsize < _FixedStack { 1115 return 1116 } 1117 // Compute how much of the stack is currently in use and only 1118 // shrink the stack if gp is using less than a quarter of its 1119 // current stack. The currently used stack includes everything 1120 // down to the SP plus the stack guard space that ensures 1121 // there's room for nosplit functions. 1122 avail := gp.stack.hi - gp.stack.lo 1123 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1124 return 1125 } 1126 1127 // We can't copy the stack if we're in a syscall. 1128 // The syscall might have pointers into the stack. 1129 if gp.syscallsp != 0 { 1130 return 1131 } 1132 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1133 return 1134 } 1135 1136 if stackDebug > 0 { 1137 print("shrinking stack ", oldsize, "->", newsize, "\n") 1138 } 1139 1140 copystack(gp, newsize, false) 1141 } 1142 1143 // freeStackSpans frees unused stack spans at the end of GC. 1144 func freeStackSpans() { 1145 lock(&stackpoolmu) 1146 1147 // Scan stack pools for empty stack spans. 1148 for order := range stackpool { 1149 list := &stackpool[order] 1150 for s := list.first; s != nil; { 1151 next := s.next 1152 if s.allocCount == 0 { 1153 list.remove(s) 1154 s.manualFreeList = 0 1155 mheap_.freeManual(s, &memstats.stacks_inuse) 1156 } 1157 s = next 1158 } 1159 } 1160 1161 unlock(&stackpoolmu) 1162 1163 // Free large stack spans. 1164 lock(&stackLarge.lock) 1165 for i := range stackLarge.free { 1166 for s := stackLarge.free[i].first; s != nil; { 1167 next := s.next 1168 stackLarge.free[i].remove(s) 1169 mheap_.freeManual(s, &memstats.stacks_inuse) 1170 s = next 1171 } 1172 } 1173 unlock(&stackLarge.lock) 1174 } 1175 1176 //go:nosplit 1177 func morestackc() { 1178 systemstack(func() { 1179 throw("attempt to execute system stack code on user stack") 1180 }) 1181 }