github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 // Goroutine preemption request. 106 // Stored into g->stackguard0 to cause split stack check failure. 107 // Must be greater than any real sp. 108 // 0xfffffade in hex. 109 const ( 110 _StackPreempt = uintptrMask & -1314 111 _StackFork = uintptrMask & -1234 112 ) 113 114 const ( 115 // stackDebug == 0: no logging 116 // == 1: logging of per-stack operations 117 // == 2: logging of per-frame operations 118 // == 3: logging of per-word updates 119 // == 4: logging of per-word reads 120 stackDebug = 0 121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 124 125 stackCache = 1 126 ) 127 128 const ( 129 uintptrMask = 1<<(8*sys.PtrSize) - 1 130 131 // Goroutine preemption request. 132 // Stored into g->stackguard0 to cause split stack check failure. 133 // Must be greater than any real sp. 134 // 0xfffffade in hex. 135 stackPreempt = uintptrMask & -1314 136 137 // Thread is forking. 138 // Stored into g->stackguard0 to cause split stack check failure. 139 // Must be greater than any real sp. 140 stackFork = uintptrMask & -1234 141 ) 142 143 // Global pool of spans that have free stacks. 144 // Stacks are assigned an order according to size. 145 // order = log_2(size/FixedStack) 146 // There is a free list for each order. 147 // TODO: one lock per order? 148 var stackpool [_NumStackOrders]mSpanList 149 var stackpoolmu mutex 150 151 // Global pool of large stack spans. 152 var stackLarge struct { 153 lock mutex 154 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 155 } 156 157 func stackinit() { 158 if _StackCacheSize&_PageMask != 0 { 159 throw("cache size must be a multiple of page size") 160 } 161 for i := range stackpool { 162 stackpool[i].init() 163 } 164 for i := range stackLarge.free { 165 stackLarge.free[i].init() 166 } 167 } 168 169 // stacklog2 returns ⌊log_2(n)⌋. 170 func stacklog2(n uintptr) int { 171 log2 := 0 172 for n > 1 { 173 n >>= 1 174 log2++ 175 } 176 return log2 177 } 178 179 // Allocates a stack from the free pool. Must be called with 180 // stackpoolmu held. 181 func stackpoolalloc(order uint8) gclinkptr { 182 list := &stackpool[order] 183 s := list.first 184 if s == nil { 185 // no free stacks. Allocate another span worth. 186 s = mheap_.allocStack(_StackCacheSize >> _PageShift) 187 if s == nil { 188 throw("out of memory") 189 } 190 if s.allocCount != 0 { 191 throw("bad allocCount") 192 } 193 if s.stackfreelist.ptr() != nil { 194 throw("bad stackfreelist") 195 } 196 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 197 x := gclinkptr(s.base() + i) 198 x.ptr().next = s.stackfreelist 199 s.stackfreelist = x 200 } 201 list.insert(s) 202 } 203 x := s.stackfreelist 204 if x.ptr() == nil { 205 throw("span has no free stacks") 206 } 207 s.stackfreelist = x.ptr().next 208 s.allocCount++ 209 if s.stackfreelist.ptr() == nil { 210 // all stacks in s are allocated. 211 list.remove(s) 212 } 213 return x 214 } 215 216 // Adds stack x to the free pool. Must be called with stackpoolmu held. 217 func stackpoolfree(x gclinkptr, order uint8) { 218 s := mheap_.lookup(unsafe.Pointer(x)) 219 if s.state != _MSpanStack { 220 throw("freeing stack not in a stack span") 221 } 222 if s.stackfreelist.ptr() == nil { 223 // s will now have a free stack 224 stackpool[order].insert(s) 225 } 226 x.ptr().next = s.stackfreelist 227 s.stackfreelist = x 228 s.allocCount-- 229 if gcphase == _GCoff && s.allocCount == 0 { 230 // Span is completely free. Return it to the heap 231 // immediately if we're sweeping. 232 // 233 // If GC is active, we delay the free until the end of 234 // GC to avoid the following type of situation: 235 // 236 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 237 // 2) The stack that pointer points to is copied 238 // 3) The old stack is freed 239 // 4) The containing span is marked free 240 // 5) GC attempts to mark the SudoG.elem pointer. The 241 // marking fails because the pointer looks like a 242 // pointer into a free span. 243 // 244 // By not freeing, we prevent step #4 until GC is done. 245 stackpool[order].remove(s) 246 s.stackfreelist = 0 247 mheap_.freeStack(s) 248 } 249 } 250 251 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 252 // The pool is required to prevent unlimited growth of per-thread caches. 253 // 254 //go:systemstack 255 func stackcacherefill(c *mcache, order uint8) { 256 if stackDebug >= 1 { 257 print("stackcacherefill order=", order, "\n") 258 } 259 260 // Grab some stacks from the global cache. 261 // Grab half of the allowed capacity (to prevent thrashing). 262 var list gclinkptr 263 var size uintptr 264 lock(&stackpoolmu) 265 for size < _StackCacheSize/2 { 266 x := stackpoolalloc(order) 267 x.ptr().next = list 268 list = x 269 size += _FixedStack << order 270 } 271 unlock(&stackpoolmu) 272 c.stackcache[order].list = list 273 c.stackcache[order].size = size 274 } 275 276 //go:systemstack 277 func stackcacherelease(c *mcache, order uint8) { 278 if stackDebug >= 1 { 279 print("stackcacherelease order=", order, "\n") 280 } 281 x := c.stackcache[order].list 282 size := c.stackcache[order].size 283 lock(&stackpoolmu) 284 for size > _StackCacheSize/2 { 285 y := x.ptr().next 286 stackpoolfree(x, order) 287 x = y 288 size -= _FixedStack << order 289 } 290 unlock(&stackpoolmu) 291 c.stackcache[order].list = x 292 c.stackcache[order].size = size 293 } 294 295 //go:systemstack 296 func stackcache_clear(c *mcache) { 297 if stackDebug >= 1 { 298 print("stackcache clear\n") 299 } 300 lock(&stackpoolmu) 301 for order := uint8(0); order < _NumStackOrders; order++ { 302 x := c.stackcache[order].list 303 for x.ptr() != nil { 304 y := x.ptr().next 305 stackpoolfree(x, order) 306 x = y 307 } 308 c.stackcache[order].list = 0 309 c.stackcache[order].size = 0 310 } 311 unlock(&stackpoolmu) 312 } 313 314 // stackalloc allocates an n byte stack. 315 // 316 // stackalloc must run on the system stack because it uses per-P 317 // resources and must not split the stack. 318 // 319 //go:systemstack 320 func stackalloc(n uint32) (stack, []stkbar) { 321 // Stackalloc must be called on scheduler stack, so that we 322 // never try to grow the stack during the code that stackalloc runs. 323 // Doing so would cause a deadlock (issue 1547). 324 thisg := getg() 325 if thisg != thisg.m.g0 { 326 throw("stackalloc not on scheduler stack") 327 } 328 if n&(n-1) != 0 { 329 throw("stack size not a power of 2") 330 } 331 if stackDebug >= 1 { 332 print("stackalloc ", n, "\n") 333 } 334 335 // Compute the size of stack barrier array. 336 maxstkbar := gcMaxStackBarriers(int(n)) 337 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 338 var stkbarSlice slice 339 340 if debug.efence != 0 || stackFromSystem != 0 { 341 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys) 342 if v == nil { 343 throw("out of memory (stackalloc)") 344 } 345 top := uintptr(n) - nstkbar 346 if maxstkbar != 0 { 347 stkbarSlice = slice{add(v, top), 0, maxstkbar} 348 } 349 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 350 } 351 352 // Small stacks are allocated with a fixed-size free-list allocator. 353 // If we need a stack of a bigger size, we fall back on allocating 354 // a dedicated span. 355 var v unsafe.Pointer 356 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 357 order := uint8(0) 358 n2 := n 359 for n2 > _FixedStack { 360 order++ 361 n2 >>= 1 362 } 363 var x gclinkptr 364 c := thisg.m.mcache 365 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 366 // c == nil can happen in the guts of exitsyscall or 367 // procresize. Just get a stack from the global pool. 368 // Also don't touch stackcache during gc 369 // as it's flushed concurrently. 370 lock(&stackpoolmu) 371 x = stackpoolalloc(order) 372 unlock(&stackpoolmu) 373 } else { 374 x = c.stackcache[order].list 375 if x.ptr() == nil { 376 stackcacherefill(c, order) 377 x = c.stackcache[order].list 378 } 379 c.stackcache[order].list = x.ptr().next 380 c.stackcache[order].size -= uintptr(n) 381 } 382 v = unsafe.Pointer(x) 383 } else { 384 var s *mspan 385 npage := uintptr(n) >> _PageShift 386 log2npage := stacklog2(npage) 387 388 // Try to get a stack from the large stack cache. 389 lock(&stackLarge.lock) 390 if !stackLarge.free[log2npage].isEmpty() { 391 s = stackLarge.free[log2npage].first 392 stackLarge.free[log2npage].remove(s) 393 } 394 unlock(&stackLarge.lock) 395 396 if s == nil { 397 // Allocate a new stack from the heap. 398 s = mheap_.allocStack(npage) 399 if s == nil { 400 throw("out of memory") 401 } 402 } 403 v = unsafe.Pointer(s.base()) 404 } 405 406 if raceenabled { 407 racemalloc(v, uintptr(n)) 408 } 409 if msanenabled { 410 msanmalloc(v, uintptr(n)) 411 } 412 if stackDebug >= 1 { 413 print(" allocated ", v, "\n") 414 } 415 top := uintptr(n) - nstkbar 416 if maxstkbar != 0 { 417 stkbarSlice = slice{add(v, top), 0, maxstkbar} 418 } 419 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 420 } 421 422 // stackfree frees an n byte stack allocation at stk. 423 // 424 // stackfree must run on the system stack because it uses per-P 425 // resources and must not split the stack. 426 // 427 //go:systemstack 428 func stackfree(stk stack, n uintptr) { 429 gp := getg() 430 v := unsafe.Pointer(stk.lo) 431 if n&(n-1) != 0 { 432 throw("stack not a power of 2") 433 } 434 if stk.lo+n < stk.hi { 435 throw("bad stack size") 436 } 437 if stackDebug >= 1 { 438 println("stackfree", v, n) 439 memclrNoHeapPointers(v, n) // for testing, clobber stack data 440 } 441 if debug.efence != 0 || stackFromSystem != 0 { 442 if debug.efence != 0 || stackFaultOnFree != 0 { 443 sysFault(v, n) 444 } else { 445 sysFree(v, n, &memstats.stacks_sys) 446 } 447 return 448 } 449 if msanenabled { 450 msanfree(v, n) 451 } 452 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 453 order := uint8(0) 454 n2 := n 455 for n2 > _FixedStack { 456 order++ 457 n2 >>= 1 458 } 459 x := gclinkptr(v) 460 c := gp.m.mcache 461 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 462 lock(&stackpoolmu) 463 stackpoolfree(x, order) 464 unlock(&stackpoolmu) 465 } else { 466 if c.stackcache[order].size >= _StackCacheSize { 467 stackcacherelease(c, order) 468 } 469 x.ptr().next = c.stackcache[order].list 470 c.stackcache[order].list = x 471 c.stackcache[order].size += n 472 } 473 } else { 474 s := mheap_.lookup(v) 475 if s.state != _MSpanStack { 476 println(hex(s.base()), v) 477 throw("bad span state") 478 } 479 if gcphase == _GCoff { 480 // Free the stack immediately if we're 481 // sweeping. 482 mheap_.freeStack(s) 483 } else { 484 // If the GC is running, we can't return a 485 // stack span to the heap because it could be 486 // reused as a heap span, and this state 487 // change would race with GC. Add it to the 488 // large stack cache instead. 489 log2npage := stacklog2(s.npages) 490 lock(&stackLarge.lock) 491 stackLarge.free[log2npage].insert(s) 492 unlock(&stackLarge.lock) 493 } 494 } 495 } 496 497 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 498 499 var ptrnames = []string{ 500 0: "scalar", 501 1: "ptr", 502 } 503 504 // Stack frame layout 505 // 506 // (x86) 507 // +------------------+ 508 // | args from caller | 509 // +------------------+ <- frame->argp 510 // | return address | 511 // +------------------+ 512 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 513 // +------------------+ <- frame->varp 514 // | locals | 515 // +------------------+ 516 // | args to callee | 517 // +------------------+ <- frame->sp 518 // 519 // (arm) 520 // +------------------+ 521 // | args from caller | 522 // +------------------+ <- frame->argp 523 // | caller's retaddr | 524 // +------------------+ <- frame->varp 525 // | locals | 526 // +------------------+ 527 // | args to callee | 528 // +------------------+ 529 // | return address | 530 // +------------------+ <- frame->sp 531 532 type adjustinfo struct { 533 old stack 534 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 535 cache pcvalueCache 536 537 // sghi is the highest sudog.elem on the stack. 538 sghi uintptr 539 } 540 541 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 542 // If so, it rewrites *vpp to point into the new stack. 543 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 544 pp := (*uintptr)(vpp) 545 p := *pp 546 if stackDebug >= 4 { 547 print(" ", pp, ":", hex(p), "\n") 548 } 549 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 550 *pp = p + adjinfo.delta 551 if stackDebug >= 3 { 552 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 553 } 554 } 555 } 556 557 // Information from the compiler about the layout of stack frames. 558 type bitvector struct { 559 n int32 // # of bits 560 bytedata *uint8 561 } 562 563 type gobitvector struct { 564 n uintptr 565 bytedata []uint8 566 } 567 568 func gobv(bv bitvector) gobitvector { 569 return gobitvector{ 570 uintptr(bv.n), 571 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 572 } 573 } 574 575 func ptrbit(bv *gobitvector, i uintptr) uint8 { 576 return (bv.bytedata[i/8] >> (i % 8)) & 1 577 } 578 579 // bv describes the memory starting at address scanp. 580 // Adjust any pointers contained therein. 581 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 582 bv := gobv(*cbv) 583 minp := adjinfo.old.lo 584 maxp := adjinfo.old.hi 585 delta := adjinfo.delta 586 num := bv.n 587 // If this frame might contain channel receive slots, use CAS 588 // to adjust pointers. If the slot hasn't been received into 589 // yet, it may contain stack pointers and a concurrent send 590 // could race with adjusting those pointers. (The sent value 591 // itself can never contain stack pointers.) 592 useCAS := uintptr(scanp) < adjinfo.sghi 593 for i := uintptr(0); i < num; i++ { 594 if stackDebug >= 4 { 595 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 596 } 597 if ptrbit(&bv, i) == 1 { 598 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 599 retry: 600 p := *pp 601 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 { 602 // Looks like a junk value in a pointer slot. 603 // Live analysis wrong? 604 getg().m.traceback = 2 605 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 606 throw("invalid pointer found on stack") 607 } 608 if minp <= p && p < maxp { 609 if stackDebug >= 3 { 610 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 611 } 612 if useCAS { 613 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 614 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 615 goto retry 616 } 617 } else { 618 *pp = p + delta 619 } 620 } 621 } 622 } 623 } 624 625 // Note: the argument/return area is adjusted by the callee. 626 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 627 adjinfo := (*adjustinfo)(arg) 628 targetpc := frame.continpc 629 if targetpc == 0 { 630 // Frame is dead. 631 return true 632 } 633 f := frame.fn 634 if stackDebug >= 2 { 635 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 636 } 637 if f.entry == systemstack_switchPC { 638 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 639 // We will allow it to be copied even though we don't 640 // have full GC info for it (because it is written in asm). 641 return true 642 } 643 if targetpc != f.entry { 644 targetpc-- 645 } 646 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 647 if pcdata == -1 { 648 pcdata = 0 // in prologue 649 } 650 651 // Adjust local variables if stack frame has been allocated. 652 size := frame.varp - frame.sp 653 var minsize uintptr 654 switch sys.ArchFamily { 655 case sys.ARM64: 656 minsize = sys.SpAlign 657 default: 658 minsize = sys.MinFrameSize 659 } 660 if size > minsize { 661 var bv bitvector 662 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 663 if stackmap == nil || stackmap.n <= 0 { 664 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 665 throw("missing stackmap") 666 } 667 // Locals bitmap information, scan just the pointers in locals. 668 if pcdata < 0 || pcdata >= stackmap.n { 669 // don't know where we are 670 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 671 throw("bad symbol table") 672 } 673 bv = stackmapdata(stackmap, pcdata) 674 size = uintptr(bv.n) * sys.PtrSize 675 if stackDebug >= 3 { 676 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 677 } 678 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 679 } 680 681 // Adjust saved base pointer if there is one. 682 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 683 if !framepointer_enabled { 684 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 685 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 686 throw("bad frame layout") 687 } 688 if stackDebug >= 3 { 689 print(" saved bp\n") 690 } 691 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 692 } 693 694 // Adjust arguments. 695 if frame.arglen > 0 { 696 var bv bitvector 697 if frame.argmap != nil { 698 bv = *frame.argmap 699 } else { 700 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 701 if stackmap == nil || stackmap.n <= 0 { 702 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 703 throw("missing stackmap") 704 } 705 if pcdata < 0 || pcdata >= stackmap.n { 706 // don't know where we are 707 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 708 throw("bad symbol table") 709 } 710 bv = stackmapdata(stackmap, pcdata) 711 } 712 if stackDebug >= 3 { 713 print(" args\n") 714 } 715 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 716 } 717 return true 718 } 719 720 func adjustctxt(gp *g, adjinfo *adjustinfo) { 721 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 722 } 723 724 func adjustdefers(gp *g, adjinfo *adjustinfo) { 725 // Adjust defer argument blocks the same way we adjust active stack frames. 726 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 727 728 // Adjust pointers in the Defer structs. 729 // Defer structs themselves are never on the stack. 730 for d := gp._defer; d != nil; d = d.link { 731 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 732 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 733 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 734 } 735 } 736 737 func adjustpanics(gp *g, adjinfo *adjustinfo) { 738 // Panics are on stack and already adjusted. 739 // Update pointer to head of list in G. 740 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 741 } 742 743 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 744 // the data elements pointed to by a SudoG structure 745 // might be in the stack. 746 for s := gp.waiting; s != nil; s = s.waitlink { 747 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 748 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone)) 749 } 750 } 751 752 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 753 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 754 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr)) 755 } 756 } 757 758 func fillstack(stk stack, b byte) { 759 for p := stk.lo; p < stk.hi; p++ { 760 *(*byte)(unsafe.Pointer(p)) = b 761 } 762 } 763 764 func findsghi(gp *g, stk stack) uintptr { 765 var sghi uintptr 766 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 767 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 768 if stk.lo <= p && p < stk.hi && p > sghi { 769 sghi = p 770 } 771 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone) 772 if stk.lo <= p && p < stk.hi && p > sghi { 773 sghi = p 774 } 775 } 776 return sghi 777 } 778 779 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 780 // stack they refer to while synchronizing with concurrent channel 781 // operations. It returns the number of bytes of stack copied. 782 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 783 if gp.waiting == nil { 784 return 0 785 } 786 787 // Lock channels to prevent concurrent send/receive. 788 // It's important that we *only* do this for async 789 // copystack; otherwise, gp may be in the middle of 790 // putting itself on wait queues and this would 791 // self-deadlock. 792 var lastc *hchan 793 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 794 if sg.c != lastc { 795 lock(&sg.c.lock) 796 } 797 lastc = sg.c 798 } 799 800 // Adjust sudogs. 801 adjustsudogs(gp, adjinfo) 802 803 // Copy the part of the stack the sudogs point in to 804 // while holding the lock to prevent races on 805 // send/receive slots. 806 var sgsize uintptr 807 if adjinfo.sghi != 0 { 808 oldBot := adjinfo.old.hi - used 809 newBot := oldBot + adjinfo.delta 810 sgsize = adjinfo.sghi - oldBot 811 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 812 } 813 814 // Unlock channels. 815 lastc = nil 816 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 817 if sg.c != lastc { 818 unlock(&sg.c.lock) 819 } 820 lastc = sg.c 821 } 822 823 return sgsize 824 } 825 826 // Copies gp's stack to a new stack of a different size. 827 // Caller must have changed gp status to Gcopystack. 828 // 829 // If sync is true, this is a self-triggered stack growth and, in 830 // particular, no other G may be writing to gp's stack (e.g., via a 831 // channel operation). If sync is false, copystack protects against 832 // concurrent channel operations. 833 func copystack(gp *g, newsize uintptr, sync bool) { 834 if gp.syscallsp != 0 { 835 throw("stack growth not allowed in system call") 836 } 837 old := gp.stack 838 if old.lo == 0 { 839 throw("nil stackbase") 840 } 841 used := old.hi - gp.sched.sp 842 843 // allocate new stack 844 new, newstkbar := stackalloc(uint32(newsize)) 845 if stackPoisonCopy != 0 { 846 fillstack(new, 0xfd) 847 } 848 if stackDebug >= 1 { 849 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 850 } 851 852 // Compute adjustment. 853 var adjinfo adjustinfo 854 adjinfo.old = old 855 adjinfo.delta = new.hi - old.hi 856 857 // Adjust sudogs, synchronizing with channel ops if necessary. 858 ncopy := used 859 if sync { 860 adjustsudogs(gp, &adjinfo) 861 } else { 862 // sudogs can point in to the stack. During concurrent 863 // shrinking, these areas may be written to. Find the 864 // highest such pointer so we can handle everything 865 // there and below carefully. (This shouldn't be far 866 // from the bottom of the stack, so there's little 867 // cost in handling everything below it carefully.) 868 adjinfo.sghi = findsghi(gp, old) 869 870 // Synchronize with channel ops and copy the part of 871 // the stack they may interact with. 872 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 873 } 874 875 // Copy the stack (or the rest of it) to the new location 876 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 877 878 // Disallow sigprof scans of this stack and block if there's 879 // one in progress. 880 gcLockStackBarriers(gp) 881 882 // Adjust remaining structures that have pointers into stacks. 883 // We have to do most of these before we traceback the new 884 // stack because gentraceback uses them. 885 adjustctxt(gp, &adjinfo) 886 adjustdefers(gp, &adjinfo) 887 adjustpanics(gp, &adjinfo) 888 adjuststkbar(gp, &adjinfo) 889 if adjinfo.sghi != 0 { 890 adjinfo.sghi += adjinfo.delta 891 } 892 893 // copy old stack barriers to new stack barrier array 894 newstkbar = newstkbar[:len(gp.stkbar)] 895 copy(newstkbar, gp.stkbar) 896 897 // Swap out old stack for new one 898 gp.stack = new 899 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 900 gp.sched.sp = new.hi - used 901 oldsize := gp.stackAlloc 902 gp.stackAlloc = newsize 903 gp.stkbar = newstkbar 904 gp.stktopsp += adjinfo.delta 905 906 // Adjust pointers in the new stack. 907 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 908 909 gcUnlockStackBarriers(gp) 910 911 // free old stack 912 if stackPoisonCopy != 0 { 913 fillstack(old, 0xfc) 914 } 915 stackfree(old, oldsize) 916 } 917 918 // round x up to a power of 2. 919 func round2(x int32) int32 { 920 s := uint(0) 921 for 1<<s < x { 922 s++ 923 } 924 return 1 << s 925 } 926 927 // Called from runtime·morestack when more stack is needed. 928 // Allocate larger stack and relocate to new stack. 929 // Stack growth is multiplicative, for constant amortized cost. 930 // 931 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 932 // If the GC is trying to stop this g then it will set preemptscan to true. 933 // 934 // ctxt is the value of the context register on morestack. newstack 935 // will write it to g.sched.ctxt. 936 func newstack(ctxt unsafe.Pointer) { 937 thisg := getg() 938 // TODO: double check all gp. shouldn't be getg(). 939 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 940 throw("stack growth after fork") 941 } 942 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 943 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 944 morebuf := thisg.m.morebuf 945 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 946 throw("runtime: wrong goroutine in newstack") 947 } 948 949 gp := thisg.m.curg 950 // Write ctxt to gp.sched. We do this here instead of in 951 // morestack so it has the necessary write barrier. 952 gp.sched.ctxt = ctxt 953 954 if thisg.m.curg.throwsplit { 955 // Update syscallsp, syscallpc in case traceback uses them. 956 morebuf := thisg.m.morebuf 957 gp.syscallsp = morebuf.sp 958 gp.syscallpc = morebuf.pc 959 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 960 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 961 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 962 963 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 964 throw("runtime: stack split at bad time") 965 } 966 967 morebuf := thisg.m.morebuf 968 thisg.m.morebuf.pc = 0 969 thisg.m.morebuf.lr = 0 970 thisg.m.morebuf.sp = 0 971 thisg.m.morebuf.g = 0 972 973 // NOTE: stackguard0 may change underfoot, if another thread 974 // is about to try to preempt gp. Read it just once and use that same 975 // value now and below. 976 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 977 978 // Be conservative about where we preempt. 979 // We are interested in preempting user Go code, not runtime code. 980 // If we're holding locks, mallocing, or preemption is disabled, don't 981 // preempt. 982 // This check is very early in newstack so that even the status change 983 // from Grunning to Gwaiting and back doesn't happen in this case. 984 // That status change by itself can be viewed as a small preemption, 985 // because the GC might change Gwaiting to Gscanwaiting, and then 986 // this goroutine has to wait for the GC to finish before continuing. 987 // If the GC is in some way dependent on this goroutine (for example, 988 // it needs a lock held by the goroutine), that small preemption turns 989 // into a real deadlock. 990 if preempt { 991 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 992 // Let the goroutine keep running for now. 993 // gp->preempt is set, so it will be preempted next time. 994 gp.stackguard0 = gp.stack.lo + _StackGuard 995 gogo(&gp.sched) // never return 996 } 997 } 998 999 if gp.stack.lo == 0 { 1000 throw("missing stack in newstack") 1001 } 1002 sp := gp.sched.sp 1003 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 1004 // The call to morestack cost a word. 1005 sp -= sys.PtrSize 1006 } 1007 if stackDebug >= 1 || sp < gp.stack.lo { 1008 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1009 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 1010 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1011 } 1012 if sp < gp.stack.lo { 1013 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 1014 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1015 throw("runtime: split stack overflow") 1016 } 1017 1018 if preempt { 1019 if gp == thisg.m.g0 { 1020 throw("runtime: preempt g0") 1021 } 1022 if thisg.m.p == 0 && thisg.m.locks == 0 { 1023 throw("runtime: g is running but p is not") 1024 } 1025 // Synchronize with scang. 1026 casgstatus(gp, _Grunning, _Gwaiting) 1027 if gp.preemptscan { 1028 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1029 // Likely to be racing with the GC as 1030 // it sees a _Gwaiting and does the 1031 // stack scan. If so, gcworkdone will 1032 // be set and gcphasework will simply 1033 // return. 1034 } 1035 if !gp.gcscandone { 1036 // gcw is safe because we're on the 1037 // system stack. 1038 gcw := &gp.m.p.ptr().gcw 1039 scanstack(gp, gcw) 1040 if gcBlackenPromptly { 1041 gcw.dispose() 1042 } 1043 gp.gcscandone = true 1044 } 1045 gp.preemptscan = false 1046 gp.preempt = false 1047 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1048 // This clears gcscanvalid. 1049 casgstatus(gp, _Gwaiting, _Grunning) 1050 gp.stackguard0 = gp.stack.lo + _StackGuard 1051 gogo(&gp.sched) // never return 1052 } 1053 1054 // Act like goroutine called runtime.Gosched. 1055 casgstatus(gp, _Gwaiting, _Grunning) 1056 gopreempt_m(gp) // never return 1057 } 1058 1059 // Allocate a bigger segment and move the stack. 1060 oldsize := int(gp.stackAlloc) 1061 newsize := oldsize * 2 1062 if uintptr(newsize) > maxstacksize { 1063 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1064 throw("stack overflow") 1065 } 1066 1067 // The goroutine must be executing in order to call newstack, 1068 // so it must be Grunning (or Gscanrunning). 1069 casgstatus(gp, _Grunning, _Gcopystack) 1070 1071 // The concurrent GC will not scan the stack while we are doing the copy since 1072 // the gp is in a Gcopystack status. 1073 copystack(gp, uintptr(newsize), true) 1074 if stackDebug >= 1 { 1075 print("stack grow done\n") 1076 } 1077 casgstatus(gp, _Gcopystack, _Grunning) 1078 gogo(&gp.sched) 1079 } 1080 1081 //go:nosplit 1082 func nilfunc() { 1083 *(*uint8)(nil) = 0 1084 } 1085 1086 // adjust Gobuf as if it executed a call to fn 1087 // and then did an immediate gosave. 1088 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1089 var fn unsafe.Pointer 1090 if fv != nil { 1091 fn = unsafe.Pointer(fv.fn) 1092 } else { 1093 fn = unsafe.Pointer(funcPC(nilfunc)) 1094 } 1095 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1096 } 1097 1098 // Maybe shrink the stack being used by gp. 1099 // Called at garbage collection time. 1100 // gp must be stopped, but the world need not be. 1101 func shrinkstack(gp *g) { 1102 gstatus := readgstatus(gp) 1103 if gstatus&^_Gscan == _Gdead { 1104 if gp.stack.lo != 0 { 1105 // Free whole stack - it will get reallocated 1106 // if G is used again. 1107 stackfree(gp.stack, gp.stackAlloc) 1108 gp.stack.lo = 0 1109 gp.stack.hi = 0 1110 gp.stkbar = nil 1111 gp.stkbarPos = 0 1112 } 1113 return 1114 } 1115 if gp.stack.lo == 0 { 1116 throw("missing stack in shrinkstack") 1117 } 1118 if gstatus&_Gscan == 0 { 1119 throw("bad status in shrinkstack") 1120 } 1121 1122 if debug.gcshrinkstackoff > 0 { 1123 return 1124 } 1125 if gp.startpc == gcBgMarkWorkerPC { 1126 // We're not allowed to shrink the gcBgMarkWorker 1127 // stack (see gcBgMarkWorker for explanation). 1128 return 1129 } 1130 1131 oldsize := gp.stackAlloc 1132 newsize := oldsize / 2 1133 // Don't shrink the allocation below the minimum-sized stack 1134 // allocation. 1135 if newsize < _FixedStack { 1136 return 1137 } 1138 // Compute how much of the stack is currently in use and only 1139 // shrink the stack if gp is using less than a quarter of its 1140 // current stack. The currently used stack includes everything 1141 // down to the SP plus the stack guard space that ensures 1142 // there's room for nosplit functions. 1143 avail := gp.stack.hi - gp.stack.lo 1144 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1145 return 1146 } 1147 1148 // We can't copy the stack if we're in a syscall. 1149 // The syscall might have pointers into the stack. 1150 if gp.syscallsp != 0 { 1151 return 1152 } 1153 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1154 return 1155 } 1156 1157 if stackDebug > 0 { 1158 print("shrinking stack ", oldsize, "->", newsize, "\n") 1159 } 1160 1161 copystack(gp, newsize, false) 1162 } 1163 1164 // freeStackSpans frees unused stack spans at the end of GC. 1165 func freeStackSpans() { 1166 lock(&stackpoolmu) 1167 1168 // Scan stack pools for empty stack spans. 1169 for order := range stackpool { 1170 list := &stackpool[order] 1171 for s := list.first; s != nil; { 1172 next := s.next 1173 if s.allocCount == 0 { 1174 list.remove(s) 1175 s.stackfreelist = 0 1176 mheap_.freeStack(s) 1177 } 1178 s = next 1179 } 1180 } 1181 1182 unlock(&stackpoolmu) 1183 1184 // Free large stack spans. 1185 lock(&stackLarge.lock) 1186 for i := range stackLarge.free { 1187 for s := stackLarge.free[i].first; s != nil; { 1188 next := s.next 1189 stackLarge.free[i].remove(s) 1190 mheap_.freeStack(s) 1191 s = next 1192 } 1193 } 1194 unlock(&stackLarge.lock) 1195 } 1196 1197 //go:nosplit 1198 func morestackc() { 1199 systemstack(func() { 1200 throw("attempt to execute C code on Go stack") 1201 }) 1202 }