github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 const ( 106 // stackDebug == 0: no logging 107 // == 1: logging of per-stack operations 108 // == 2: logging of per-frame operations 109 // == 3: logging of per-word updates 110 // == 4: logging of per-word reads 111 stackDebug = 0 112 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 113 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 114 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 115 stackNoCache = 0 // disable per-P small stack caches 116 117 // check the BP links during traceback. 118 debugCheckBP = false 119 ) 120 121 const ( 122 uintptrMask = 1<<(8*sys.PtrSize) - 1 123 124 // Goroutine preemption request. 125 // Stored into g->stackguard0 to cause split stack check failure. 126 // Must be greater than any real sp. 127 // 0xfffffade in hex. 128 stackPreempt = uintptrMask & -1314 129 130 // Thread is forking. 131 // Stored into g->stackguard0 to cause split stack check failure. 132 // Must be greater than any real sp. 133 stackFork = uintptrMask & -1234 134 ) 135 136 // Global pool of spans that have free stacks. 137 // Stacks are assigned an order according to size. 138 // order = log_2(size/FixedStack) 139 // There is a free list for each order. 140 // TODO: one lock per order? 141 var stackpool [_NumStackOrders]mSpanList 142 var stackpoolmu mutex 143 144 // Global pool of large stack spans. 145 var stackLarge struct { 146 lock mutex 147 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 148 } 149 150 func stackinit() { 151 if _StackCacheSize&_PageMask != 0 { 152 throw("cache size must be a multiple of page size") 153 } 154 for i := range stackpool { 155 stackpool[i].init() 156 } 157 for i := range stackLarge.free { 158 stackLarge.free[i].init() 159 } 160 } 161 162 // stacklog2 returns ⌊log_2(n)⌋. 163 func stacklog2(n uintptr) int { 164 log2 := 0 165 for n > 1 { 166 n >>= 1 167 log2++ 168 } 169 return log2 170 } 171 172 // Allocates a stack from the free pool. Must be called with 173 // stackpoolmu held. 174 func stackpoolalloc(order uint8) gclinkptr { 175 list := &stackpool[order] 176 s := list.first 177 if s == nil { 178 // no free stacks. Allocate another span worth. 179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) 180 if s == nil { 181 throw("out of memory") 182 } 183 if s.allocCount != 0 { 184 throw("bad allocCount") 185 } 186 if s.manualFreeList.ptr() != nil { 187 throw("bad manualFreeList") 188 } 189 s.elemsize = _FixedStack << order 190 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 191 x := gclinkptr(s.base() + i) 192 x.ptr().next = s.manualFreeList 193 s.manualFreeList = x 194 } 195 list.insert(s) 196 } 197 x := s.manualFreeList 198 if x.ptr() == nil { 199 throw("span has no free stacks") 200 } 201 s.manualFreeList = x.ptr().next 202 s.allocCount++ 203 if s.manualFreeList.ptr() == nil { 204 // all stacks in s are allocated. 205 list.remove(s) 206 } 207 return x 208 } 209 210 // Adds stack x to the free pool. Must be called with stackpoolmu held. 211 func stackpoolfree(x gclinkptr, order uint8) { 212 s := mheap_.lookup(unsafe.Pointer(x)) 213 if s.state != _MSpanManual { 214 throw("freeing stack not in a stack span") 215 } 216 if s.manualFreeList.ptr() == nil { 217 // s will now have a free stack 218 stackpool[order].insert(s) 219 } 220 x.ptr().next = s.manualFreeList 221 s.manualFreeList = x 222 s.allocCount-- 223 if gcphase == _GCoff && s.allocCount == 0 { 224 // Span is completely free. Return it to the heap 225 // immediately if we're sweeping. 226 // 227 // If GC is active, we delay the free until the end of 228 // GC to avoid the following type of situation: 229 // 230 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 231 // 2) The stack that pointer points to is copied 232 // 3) The old stack is freed 233 // 4) The containing span is marked free 234 // 5) GC attempts to mark the SudoG.elem pointer. The 235 // marking fails because the pointer looks like a 236 // pointer into a free span. 237 // 238 // By not freeing, we prevent step #4 until GC is done. 239 stackpool[order].remove(s) 240 s.manualFreeList = 0 241 mheap_.freeManual(s, &memstats.stacks_inuse) 242 } 243 } 244 245 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 246 // The pool is required to prevent unlimited growth of per-thread caches. 247 // 248 //go:systemstack 249 func stackcacherefill(c *mcache, order uint8) { 250 if stackDebug >= 1 { 251 print("stackcacherefill order=", order, "\n") 252 } 253 254 // Grab some stacks from the global cache. 255 // Grab half of the allowed capacity (to prevent thrashing). 256 var list gclinkptr 257 var size uintptr 258 lock(&stackpoolmu) 259 for size < _StackCacheSize/2 { 260 x := stackpoolalloc(order) 261 x.ptr().next = list 262 list = x 263 size += _FixedStack << order 264 } 265 unlock(&stackpoolmu) 266 c.stackcache[order].list = list 267 c.stackcache[order].size = size 268 } 269 270 //go:systemstack 271 func stackcacherelease(c *mcache, order uint8) { 272 if stackDebug >= 1 { 273 print("stackcacherelease order=", order, "\n") 274 } 275 x := c.stackcache[order].list 276 size := c.stackcache[order].size 277 lock(&stackpoolmu) 278 for size > _StackCacheSize/2 { 279 y := x.ptr().next 280 stackpoolfree(x, order) 281 x = y 282 size -= _FixedStack << order 283 } 284 unlock(&stackpoolmu) 285 c.stackcache[order].list = x 286 c.stackcache[order].size = size 287 } 288 289 //go:systemstack 290 func stackcache_clear(c *mcache) { 291 if stackDebug >= 1 { 292 print("stackcache clear\n") 293 } 294 lock(&stackpoolmu) 295 for order := uint8(0); order < _NumStackOrders; order++ { 296 x := c.stackcache[order].list 297 for x.ptr() != nil { 298 y := x.ptr().next 299 stackpoolfree(x, order) 300 x = y 301 } 302 c.stackcache[order].list = 0 303 c.stackcache[order].size = 0 304 } 305 unlock(&stackpoolmu) 306 } 307 308 // stackalloc allocates an n byte stack. 309 // 310 // stackalloc must run on the system stack because it uses per-P 311 // resources and must not split the stack. 312 // 313 //go:systemstack 314 func stackalloc(n uint32) stack { 315 // Stackalloc must be called on scheduler stack, so that we 316 // never try to grow the stack during the code that stackalloc runs. 317 // Doing so would cause a deadlock (issue 1547). 318 thisg := getg() 319 if thisg != thisg.m.g0 { 320 throw("stackalloc not on scheduler stack") 321 } 322 if n&(n-1) != 0 { 323 throw("stack size not a power of 2") 324 } 325 if stackDebug >= 1 { 326 print("stackalloc ", n, "\n") 327 } 328 329 if debug.efence != 0 || stackFromSystem != 0 { 330 n = uint32(round(uintptr(n), physPageSize)) 331 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 332 if v == nil { 333 throw("out of memory (stackalloc)") 334 } 335 return stack{uintptr(v), uintptr(v) + uintptr(n)} 336 } 337 338 // Small stacks are allocated with a fixed-size free-list allocator. 339 // If we need a stack of a bigger size, we fall back on allocating 340 // a dedicated span. 341 var v unsafe.Pointer 342 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 343 order := uint8(0) 344 n2 := n 345 for n2 > _FixedStack { 346 order++ 347 n2 >>= 1 348 } 349 var x gclinkptr 350 c := thisg.m.mcache 351 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 352 // c == nil can happen in the guts of exitsyscall or 353 // procresize. Just get a stack from the global pool. 354 // Also don't touch stackcache during gc 355 // as it's flushed concurrently. 356 lock(&stackpoolmu) 357 x = stackpoolalloc(order) 358 unlock(&stackpoolmu) 359 } else { 360 x = c.stackcache[order].list 361 if x.ptr() == nil { 362 stackcacherefill(c, order) 363 x = c.stackcache[order].list 364 } 365 c.stackcache[order].list = x.ptr().next 366 c.stackcache[order].size -= uintptr(n) 367 } 368 v = unsafe.Pointer(x) 369 } else { 370 var s *mspan 371 npage := uintptr(n) >> _PageShift 372 log2npage := stacklog2(npage) 373 374 // Try to get a stack from the large stack cache. 375 lock(&stackLarge.lock) 376 if !stackLarge.free[log2npage].isEmpty() { 377 s = stackLarge.free[log2npage].first 378 stackLarge.free[log2npage].remove(s) 379 } 380 unlock(&stackLarge.lock) 381 382 if s == nil { 383 // Allocate a new stack from the heap. 384 s = mheap_.allocManual(npage, &memstats.stacks_inuse) 385 if s == nil { 386 throw("out of memory") 387 } 388 s.elemsize = uintptr(n) 389 } 390 v = unsafe.Pointer(s.base()) 391 } 392 393 if raceenabled { 394 racemalloc(v, uintptr(n)) 395 } 396 if msanenabled { 397 msanmalloc(v, uintptr(n)) 398 } 399 if stackDebug >= 1 { 400 print(" allocated ", v, "\n") 401 } 402 return stack{uintptr(v), uintptr(v) + uintptr(n)} 403 } 404 405 // stackfree frees an n byte stack allocation at stk. 406 // 407 // stackfree must run on the system stack because it uses per-P 408 // resources and must not split the stack. 409 // 410 //go:systemstack 411 func stackfree(stk stack) { 412 gp := getg() 413 v := unsafe.Pointer(stk.lo) 414 n := stk.hi - stk.lo 415 if n&(n-1) != 0 { 416 throw("stack not a power of 2") 417 } 418 if stk.lo+n < stk.hi { 419 throw("bad stack size") 420 } 421 if stackDebug >= 1 { 422 println("stackfree", v, n) 423 memclrNoHeapPointers(v, n) // for testing, clobber stack data 424 } 425 if debug.efence != 0 || stackFromSystem != 0 { 426 if debug.efence != 0 || stackFaultOnFree != 0 { 427 sysFault(v, n) 428 } else { 429 sysFree(v, n, &memstats.stacks_sys) 430 } 431 return 432 } 433 if msanenabled { 434 msanfree(v, n) 435 } 436 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 437 order := uint8(0) 438 n2 := n 439 for n2 > _FixedStack { 440 order++ 441 n2 >>= 1 442 } 443 x := gclinkptr(v) 444 c := gp.m.mcache 445 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 446 lock(&stackpoolmu) 447 stackpoolfree(x, order) 448 unlock(&stackpoolmu) 449 } else { 450 if c.stackcache[order].size >= _StackCacheSize { 451 stackcacherelease(c, order) 452 } 453 x.ptr().next = c.stackcache[order].list 454 c.stackcache[order].list = x 455 c.stackcache[order].size += n 456 } 457 } else { 458 s := mheap_.lookup(v) 459 if s.state != _MSpanManual { 460 println(hex(s.base()), v) 461 throw("bad span state") 462 } 463 if gcphase == _GCoff { 464 // Free the stack immediately if we're 465 // sweeping. 466 mheap_.freeManual(s, &memstats.stacks_inuse) 467 } else { 468 // If the GC is running, we can't return a 469 // stack span to the heap because it could be 470 // reused as a heap span, and this state 471 // change would race with GC. Add it to the 472 // large stack cache instead. 473 log2npage := stacklog2(s.npages) 474 lock(&stackLarge.lock) 475 stackLarge.free[log2npage].insert(s) 476 unlock(&stackLarge.lock) 477 } 478 } 479 } 480 481 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 482 483 var ptrnames = []string{ 484 0: "scalar", 485 1: "ptr", 486 } 487 488 // Stack frame layout 489 // 490 // (x86) 491 // +------------------+ 492 // | args from caller | 493 // +------------------+ <- frame->argp 494 // | return address | 495 // +------------------+ 496 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 497 // +------------------+ <- frame->varp 498 // | locals | 499 // +------------------+ 500 // | args to callee | 501 // +------------------+ <- frame->sp 502 // 503 // (arm) 504 // +------------------+ 505 // | args from caller | 506 // +------------------+ <- frame->argp 507 // | caller's retaddr | 508 // +------------------+ <- frame->varp 509 // | locals | 510 // +------------------+ 511 // | args to callee | 512 // +------------------+ 513 // | return address | 514 // +------------------+ <- frame->sp 515 516 type adjustinfo struct { 517 old stack 518 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 519 cache pcvalueCache 520 521 // sghi is the highest sudog.elem on the stack. 522 sghi uintptr 523 } 524 525 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 526 // If so, it rewrites *vpp to point into the new stack. 527 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 528 pp := (*uintptr)(vpp) 529 p := *pp 530 if stackDebug >= 4 { 531 print(" ", pp, ":", hex(p), "\n") 532 } 533 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 534 *pp = p + adjinfo.delta 535 if stackDebug >= 3 { 536 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 537 } 538 } 539 } 540 541 // Information from the compiler about the layout of stack frames. 542 type bitvector struct { 543 n int32 // # of bits 544 bytedata *uint8 545 } 546 547 type gobitvector struct { 548 n uintptr 549 bytedata []uint8 550 } 551 552 func gobv(bv bitvector) gobitvector { 553 return gobitvector{ 554 uintptr(bv.n), 555 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 556 } 557 } 558 559 func ptrbit(bv *gobitvector, i uintptr) uint8 { 560 return (bv.bytedata[i/8] >> (i % 8)) & 1 561 } 562 563 // bv describes the memory starting at address scanp. 564 // Adjust any pointers contained therein. 565 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) { 566 bv := gobv(*cbv) 567 minp := adjinfo.old.lo 568 maxp := adjinfo.old.hi 569 delta := adjinfo.delta 570 num := bv.n 571 // If this frame might contain channel receive slots, use CAS 572 // to adjust pointers. If the slot hasn't been received into 573 // yet, it may contain stack pointers and a concurrent send 574 // could race with adjusting those pointers. (The sent value 575 // itself can never contain stack pointers.) 576 useCAS := uintptr(scanp) < adjinfo.sghi 577 for i := uintptr(0); i < num; i++ { 578 if stackDebug >= 4 { 579 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 580 } 581 if ptrbit(&bv, i) != 1 { 582 continue 583 } 584 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 585 retry: 586 p := *pp 587 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 588 // Looks like a junk value in a pointer slot. 589 // Live analysis wrong? 590 getg().m.traceback = 2 591 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 592 throw("invalid pointer found on stack") 593 } 594 if minp <= p && p < maxp { 595 if stackDebug >= 3 { 596 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 597 } 598 if useCAS { 599 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 600 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 601 goto retry 602 } 603 } else { 604 *pp = p + delta 605 } 606 } 607 } 608 } 609 610 // Note: the argument/return area is adjusted by the callee. 611 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 612 adjinfo := (*adjustinfo)(arg) 613 targetpc := frame.continpc 614 if targetpc == 0 { 615 // Frame is dead. 616 return true 617 } 618 f := frame.fn 619 if stackDebug >= 2 { 620 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 621 } 622 if f.entry == systemstack_switchPC { 623 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 624 // We will allow it to be copied even though we don't 625 // have full GC info for it (because it is written in asm). 626 return true 627 } 628 if targetpc != f.entry { 629 targetpc-- 630 } 631 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 632 if pcdata == -1 { 633 pcdata = 0 // in prologue 634 } 635 636 // Adjust local variables if stack frame has been allocated. 637 size := frame.varp - frame.sp 638 var minsize uintptr 639 switch sys.ArchFamily { 640 case sys.ARM64: 641 minsize = sys.SpAlign 642 default: 643 minsize = sys.MinFrameSize 644 } 645 if size > minsize { 646 var bv bitvector 647 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 648 if stackmap == nil || stackmap.n <= 0 { 649 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 650 throw("missing stackmap") 651 } 652 // Locals bitmap information, scan just the pointers in locals. 653 if pcdata < 0 || pcdata >= stackmap.n { 654 // don't know where we are 655 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 656 throw("bad symbol table") 657 } 658 bv = stackmapdata(stackmap, pcdata) 659 size = uintptr(bv.n) * sys.PtrSize 660 if stackDebug >= 3 { 661 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 662 } 663 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 664 } 665 666 // Adjust saved base pointer if there is one. 667 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 668 if !framepointer_enabled { 669 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 670 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 671 throw("bad frame layout") 672 } 673 if stackDebug >= 3 { 674 print(" saved bp\n") 675 } 676 if debugCheckBP { 677 // Frame pointers should always point to the next higher frame on 678 // the Go stack (or be nil, for the top frame on the stack). 679 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 680 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 681 println("runtime: found invalid frame pointer") 682 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 683 throw("bad frame pointer") 684 } 685 } 686 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 687 } 688 689 // Adjust arguments. 690 if frame.arglen > 0 { 691 var bv bitvector 692 if frame.argmap != nil { 693 bv = *frame.argmap 694 } else { 695 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 696 if stackmap == nil || stackmap.n <= 0 { 697 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 698 throw("missing stackmap") 699 } 700 if pcdata < 0 || pcdata >= stackmap.n { 701 // don't know where we are 702 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 703 throw("bad symbol table") 704 } 705 bv = stackmapdata(stackmap, pcdata) 706 } 707 if stackDebug >= 3 { 708 print(" args\n") 709 } 710 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{}) 711 } 712 return true 713 } 714 715 func adjustctxt(gp *g, adjinfo *adjustinfo) { 716 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 717 if !framepointer_enabled { 718 return 719 } 720 if debugCheckBP { 721 bp := gp.sched.bp 722 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 723 println("runtime: found invalid top frame pointer") 724 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 725 throw("bad top frame pointer") 726 } 727 } 728 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 729 } 730 731 func adjustdefers(gp *g, adjinfo *adjustinfo) { 732 // Adjust defer argument blocks the same way we adjust active stack frames. 733 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 734 735 // Adjust pointers in the Defer structs. 736 // Defer structs themselves are never on the stack. 737 for d := gp._defer; d != nil; d = d.link { 738 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 739 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 740 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 741 } 742 } 743 744 func adjustpanics(gp *g, adjinfo *adjustinfo) { 745 // Panics are on stack and already adjusted. 746 // Update pointer to head of list in G. 747 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 748 } 749 750 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 751 // the data elements pointed to by a SudoG structure 752 // might be in the stack. 753 for s := gp.waiting; s != nil; s = s.waitlink { 754 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 755 } 756 } 757 758 func fillstack(stk stack, b byte) { 759 for p := stk.lo; p < stk.hi; p++ { 760 *(*byte)(unsafe.Pointer(p)) = b 761 } 762 } 763 764 func findsghi(gp *g, stk stack) uintptr { 765 var sghi uintptr 766 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 767 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 768 if stk.lo <= p && p < stk.hi && p > sghi { 769 sghi = p 770 } 771 } 772 return sghi 773 } 774 775 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 776 // stack they refer to while synchronizing with concurrent channel 777 // operations. It returns the number of bytes of stack copied. 778 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 779 if gp.waiting == nil { 780 return 0 781 } 782 783 // Lock channels to prevent concurrent send/receive. 784 // It's important that we *only* do this for async 785 // copystack; otherwise, gp may be in the middle of 786 // putting itself on wait queues and this would 787 // self-deadlock. 788 var lastc *hchan 789 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 790 if sg.c != lastc { 791 lock(&sg.c.lock) 792 } 793 lastc = sg.c 794 } 795 796 // Adjust sudogs. 797 adjustsudogs(gp, adjinfo) 798 799 // Copy the part of the stack the sudogs point in to 800 // while holding the lock to prevent races on 801 // send/receive slots. 802 var sgsize uintptr 803 if adjinfo.sghi != 0 { 804 oldBot := adjinfo.old.hi - used 805 newBot := oldBot + adjinfo.delta 806 sgsize = adjinfo.sghi - oldBot 807 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 808 } 809 810 // Unlock channels. 811 lastc = nil 812 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 813 if sg.c != lastc { 814 unlock(&sg.c.lock) 815 } 816 lastc = sg.c 817 } 818 819 return sgsize 820 } 821 822 // Copies gp's stack to a new stack of a different size. 823 // Caller must have changed gp status to Gcopystack. 824 // 825 // If sync is true, this is a self-triggered stack growth and, in 826 // particular, no other G may be writing to gp's stack (e.g., via a 827 // channel operation). If sync is false, copystack protects against 828 // concurrent channel operations. 829 func copystack(gp *g, newsize uintptr, sync bool) { 830 if gp.syscallsp != 0 { 831 throw("stack growth not allowed in system call") 832 } 833 old := gp.stack 834 if old.lo == 0 { 835 throw("nil stackbase") 836 } 837 used := old.hi - gp.sched.sp 838 839 // allocate new stack 840 new := stackalloc(uint32(newsize)) 841 if stackPoisonCopy != 0 { 842 fillstack(new, 0xfd) 843 } 844 if stackDebug >= 1 { 845 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 846 } 847 848 // Compute adjustment. 849 var adjinfo adjustinfo 850 adjinfo.old = old 851 adjinfo.delta = new.hi - old.hi 852 853 // Adjust sudogs, synchronizing with channel ops if necessary. 854 ncopy := used 855 if sync { 856 adjustsudogs(gp, &adjinfo) 857 } else { 858 // sudogs can point in to the stack. During concurrent 859 // shrinking, these areas may be written to. Find the 860 // highest such pointer so we can handle everything 861 // there and below carefully. (This shouldn't be far 862 // from the bottom of the stack, so there's little 863 // cost in handling everything below it carefully.) 864 adjinfo.sghi = findsghi(gp, old) 865 866 // Synchronize with channel ops and copy the part of 867 // the stack they may interact with. 868 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 869 } 870 871 // Copy the stack (or the rest of it) to the new location 872 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 873 874 // Adjust remaining structures that have pointers into stacks. 875 // We have to do most of these before we traceback the new 876 // stack because gentraceback uses them. 877 adjustctxt(gp, &adjinfo) 878 adjustdefers(gp, &adjinfo) 879 adjustpanics(gp, &adjinfo) 880 if adjinfo.sghi != 0 { 881 adjinfo.sghi += adjinfo.delta 882 } 883 884 // Swap out old stack for new one 885 gp.stack = new 886 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 887 gp.sched.sp = new.hi - used 888 gp.stktopsp += adjinfo.delta 889 890 // Adjust pointers in the new stack. 891 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 892 893 // free old stack 894 if stackPoisonCopy != 0 { 895 fillstack(old, 0xfc) 896 } 897 stackfree(old) 898 } 899 900 // round x up to a power of 2. 901 func round2(x int32) int32 { 902 s := uint(0) 903 for 1<<s < x { 904 s++ 905 } 906 return 1 << s 907 } 908 909 // Called from runtime·morestack when more stack is needed. 910 // Allocate larger stack and relocate to new stack. 911 // Stack growth is multiplicative, for constant amortized cost. 912 // 913 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 914 // If the GC is trying to stop this g then it will set preemptscan to true. 915 // 916 // This must be nowritebarrierrec because it can be called as part of 917 // stack growth from other nowritebarrierrec functions, but the 918 // compiler doesn't check this. 919 // 920 //go:nowritebarrierrec 921 func newstack() { 922 thisg := getg() 923 // TODO: double check all gp. shouldn't be getg(). 924 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 925 throw("stack growth after fork") 926 } 927 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 928 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 929 morebuf := thisg.m.morebuf 930 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 931 throw("runtime: wrong goroutine in newstack") 932 } 933 934 gp := thisg.m.curg 935 936 if thisg.m.curg.throwsplit { 937 // Update syscallsp, syscallpc in case traceback uses them. 938 morebuf := thisg.m.morebuf 939 gp.syscallsp = morebuf.sp 940 gp.syscallpc = morebuf.pc 941 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 942 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 943 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 944 945 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 946 throw("runtime: stack split at bad time") 947 } 948 949 morebuf := thisg.m.morebuf 950 thisg.m.morebuf.pc = 0 951 thisg.m.morebuf.lr = 0 952 thisg.m.morebuf.sp = 0 953 thisg.m.morebuf.g = 0 954 955 // NOTE: stackguard0 may change underfoot, if another thread 956 // is about to try to preempt gp. Read it just once and use that same 957 // value now and below. 958 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 959 960 // Be conservative about where we preempt. 961 // We are interested in preempting user Go code, not runtime code. 962 // If we're holding locks, mallocing, or preemption is disabled, don't 963 // preempt. 964 // This check is very early in newstack so that even the status change 965 // from Grunning to Gwaiting and back doesn't happen in this case. 966 // That status change by itself can be viewed as a small preemption, 967 // because the GC might change Gwaiting to Gscanwaiting, and then 968 // this goroutine has to wait for the GC to finish before continuing. 969 // If the GC is in some way dependent on this goroutine (for example, 970 // it needs a lock held by the goroutine), that small preemption turns 971 // into a real deadlock. 972 if preempt { 973 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 974 // Let the goroutine keep running for now. 975 // gp->preempt is set, so it will be preempted next time. 976 gp.stackguard0 = gp.stack.lo + _StackGuard 977 gogo(&gp.sched) // never return 978 } 979 } 980 981 if gp.stack.lo == 0 { 982 throw("missing stack in newstack") 983 } 984 sp := gp.sched.sp 985 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 986 // The call to morestack cost a word. 987 sp -= sys.PtrSize 988 } 989 if stackDebug >= 1 || sp < gp.stack.lo { 990 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 991 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 992 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 993 } 994 if sp < gp.stack.lo { 995 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 996 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 997 throw("runtime: split stack overflow") 998 } 999 1000 if preempt { 1001 if gp == thisg.m.g0 { 1002 throw("runtime: preempt g0") 1003 } 1004 if thisg.m.p == 0 && thisg.m.locks == 0 { 1005 throw("runtime: g is running but p is not") 1006 } 1007 // Synchronize with scang. 1008 casgstatus(gp, _Grunning, _Gwaiting) 1009 if gp.preemptscan { 1010 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1011 // Likely to be racing with the GC as 1012 // it sees a _Gwaiting and does the 1013 // stack scan. If so, gcworkdone will 1014 // be set and gcphasework will simply 1015 // return. 1016 } 1017 if !gp.gcscandone { 1018 // gcw is safe because we're on the 1019 // system stack. 1020 gcw := &gp.m.p.ptr().gcw 1021 scanstack(gp, gcw) 1022 if gcBlackenPromptly { 1023 gcw.dispose() 1024 } 1025 gp.gcscandone = true 1026 } 1027 gp.preemptscan = false 1028 gp.preempt = false 1029 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1030 // This clears gcscanvalid. 1031 casgstatus(gp, _Gwaiting, _Grunning) 1032 gp.stackguard0 = gp.stack.lo + _StackGuard 1033 gogo(&gp.sched) // never return 1034 } 1035 1036 // Act like goroutine called runtime.Gosched. 1037 casgstatus(gp, _Gwaiting, _Grunning) 1038 gopreempt_m(gp) // never return 1039 } 1040 1041 // Allocate a bigger segment and move the stack. 1042 oldsize := gp.stack.hi - gp.stack.lo 1043 newsize := oldsize * 2 1044 if newsize > maxstacksize { 1045 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1046 throw("stack overflow") 1047 } 1048 1049 // The goroutine must be executing in order to call newstack, 1050 // so it must be Grunning (or Gscanrunning). 1051 casgstatus(gp, _Grunning, _Gcopystack) 1052 1053 // The concurrent GC will not scan the stack while we are doing the copy since 1054 // the gp is in a Gcopystack status. 1055 copystack(gp, newsize, true) 1056 if stackDebug >= 1 { 1057 print("stack grow done\n") 1058 } 1059 casgstatus(gp, _Gcopystack, _Grunning) 1060 gogo(&gp.sched) 1061 } 1062 1063 //go:nosplit 1064 func nilfunc() { 1065 *(*uint8)(nil) = 0 1066 } 1067 1068 // adjust Gobuf as if it executed a call to fn 1069 // and then did an immediate gosave. 1070 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1071 var fn unsafe.Pointer 1072 if fv != nil { 1073 fn = unsafe.Pointer(fv.fn) 1074 } else { 1075 fn = unsafe.Pointer(funcPC(nilfunc)) 1076 } 1077 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1078 } 1079 1080 // Maybe shrink the stack being used by gp. 1081 // Called at garbage collection time. 1082 // gp must be stopped, but the world need not be. 1083 func shrinkstack(gp *g) { 1084 gstatus := readgstatus(gp) 1085 if gstatus&^_Gscan == _Gdead { 1086 if gp.stack.lo != 0 { 1087 // Free whole stack - it will get reallocated 1088 // if G is used again. 1089 stackfree(gp.stack) 1090 gp.stack.lo = 0 1091 gp.stack.hi = 0 1092 } 1093 return 1094 } 1095 if gp.stack.lo == 0 { 1096 throw("missing stack in shrinkstack") 1097 } 1098 if gstatus&_Gscan == 0 { 1099 throw("bad status in shrinkstack") 1100 } 1101 1102 if debug.gcshrinkstackoff > 0 { 1103 return 1104 } 1105 if gp.startpc == gcBgMarkWorkerPC { 1106 // We're not allowed to shrink the gcBgMarkWorker 1107 // stack (see gcBgMarkWorker for explanation). 1108 return 1109 } 1110 1111 oldsize := gp.stack.hi - gp.stack.lo 1112 newsize := oldsize / 2 1113 // Don't shrink the allocation below the minimum-sized stack 1114 // allocation. 1115 if newsize < _FixedStack { 1116 return 1117 } 1118 // Compute how much of the stack is currently in use and only 1119 // shrink the stack if gp is using less than a quarter of its 1120 // current stack. The currently used stack includes everything 1121 // down to the SP plus the stack guard space that ensures 1122 // there's room for nosplit functions. 1123 avail := gp.stack.hi - gp.stack.lo 1124 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1125 return 1126 } 1127 1128 // We can't copy the stack if we're in a syscall. 1129 // The syscall might have pointers into the stack. 1130 if gp.syscallsp != 0 { 1131 return 1132 } 1133 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1134 return 1135 } 1136 1137 if stackDebug > 0 { 1138 print("shrinking stack ", oldsize, "->", newsize, "\n") 1139 } 1140 1141 copystack(gp, newsize, false) 1142 } 1143 1144 // freeStackSpans frees unused stack spans at the end of GC. 1145 func freeStackSpans() { 1146 lock(&stackpoolmu) 1147 1148 // Scan stack pools for empty stack spans. 1149 for order := range stackpool { 1150 list := &stackpool[order] 1151 for s := list.first; s != nil; { 1152 next := s.next 1153 if s.allocCount == 0 { 1154 list.remove(s) 1155 s.manualFreeList = 0 1156 mheap_.freeManual(s, &memstats.stacks_inuse) 1157 } 1158 s = next 1159 } 1160 } 1161 1162 unlock(&stackpoolmu) 1163 1164 // Free large stack spans. 1165 lock(&stackLarge.lock) 1166 for i := range stackLarge.free { 1167 for s := stackLarge.free[i].first; s != nil; { 1168 next := s.next 1169 stackLarge.free[i].remove(s) 1170 mheap_.freeManual(s, &memstats.stacks_inuse) 1171 s = next 1172 } 1173 } 1174 unlock(&stackLarge.lock) 1175 } 1176 1177 //go:nosplit 1178 func morestackc() { 1179 systemstack(func() { 1180 throw("attempt to execute system stack code on user stack") 1181 }) 1182 }