github.com/liujq9674git/golang-src-1.7@v0.0.0-20230517174348-17f6ec47f3f8/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 // Goroutine preemption request. 106 // Stored into g->stackguard0 to cause split stack check failure. 107 // Must be greater than any real sp. 108 // 0xfffffade in hex. 109 const ( 110 _StackPreempt = uintptrMask & -1314 111 _StackFork = uintptrMask & -1234 112 ) 113 114 const ( 115 // stackDebug == 0: no logging 116 // == 1: logging of per-stack operations 117 // == 2: logging of per-frame operations 118 // == 3: logging of per-word updates 119 // == 4: logging of per-word reads 120 stackDebug = 0 121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 124 125 stackCache = 1 126 ) 127 128 const ( 129 uintptrMask = 1<<(8*sys.PtrSize) - 1 130 131 // Goroutine preemption request. 132 // Stored into g->stackguard0 to cause split stack check failure. 133 // Must be greater than any real sp. 134 // 0xfffffade in hex. 135 stackPreempt = uintptrMask & -1314 136 137 // Thread is forking. 138 // Stored into g->stackguard0 to cause split stack check failure. 139 // Must be greater than any real sp. 140 stackFork = uintptrMask & -1234 141 ) 142 143 // Global pool of spans that have free stacks. 144 // Stacks are assigned an order according to size. 145 // order = log_2(size/FixedStack) 146 // There is a free list for each order. 147 // TODO: one lock per order? 148 var stackpool [_NumStackOrders]mSpanList 149 var stackpoolmu mutex 150 151 // Global pool of large stack spans. 152 var stackLarge struct { 153 lock mutex 154 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 155 } 156 157 func stackinit() { 158 if _StackCacheSize&_PageMask != 0 { 159 throw("cache size must be a multiple of page size") 160 } 161 for i := range stackpool { 162 stackpool[i].init() 163 } 164 for i := range stackLarge.free { 165 stackLarge.free[i].init() 166 } 167 } 168 169 // stacklog2 returns ⌊log_2(n)⌋. 170 func stacklog2(n uintptr) int { 171 log2 := 0 172 for n > 1 { 173 n >>= 1 174 log2++ 175 } 176 return log2 177 } 178 179 // Allocates a stack from the free pool. Must be called with 180 // stackpoolmu held. 181 func stackpoolalloc(order uint8) gclinkptr { 182 list := &stackpool[order] 183 s := list.first 184 if s == nil { 185 // no free stacks. Allocate another span worth. 186 s = mheap_.allocStack(_StackCacheSize >> _PageShift) 187 if s == nil { 188 throw("out of memory") 189 } 190 if s.allocCount != 0 { 191 throw("bad allocCount") 192 } 193 if s.stackfreelist.ptr() != nil { 194 throw("bad stackfreelist") 195 } 196 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 197 x := gclinkptr(s.base() + i) 198 x.ptr().next = s.stackfreelist 199 s.stackfreelist = x 200 } 201 list.insert(s) 202 } 203 x := s.stackfreelist 204 if x.ptr() == nil { 205 throw("span has no free stacks") 206 } 207 s.stackfreelist = x.ptr().next 208 s.allocCount++ 209 if s.stackfreelist.ptr() == nil { 210 // all stacks in s are allocated. 211 list.remove(s) 212 } 213 return x 214 } 215 216 // Adds stack x to the free pool. Must be called with stackpoolmu held. 217 func stackpoolfree(x gclinkptr, order uint8) { 218 s := mheap_.lookup(unsafe.Pointer(x)) 219 if s.state != _MSpanStack { 220 throw("freeing stack not in a stack span") 221 } 222 if s.stackfreelist.ptr() == nil { 223 // s will now have a free stack 224 stackpool[order].insert(s) 225 } 226 x.ptr().next = s.stackfreelist 227 s.stackfreelist = x 228 s.allocCount-- 229 if gcphase == _GCoff && s.allocCount == 0 { 230 // Span is completely free. Return it to the heap 231 // immediately if we're sweeping. 232 // 233 // If GC is active, we delay the free until the end of 234 // GC to avoid the following type of situation: 235 // 236 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 237 // 2) The stack that pointer points to is copied 238 // 3) The old stack is freed 239 // 4) The containing span is marked free 240 // 5) GC attempts to mark the SudoG.elem pointer. The 241 // marking fails because the pointer looks like a 242 // pointer into a free span. 243 // 244 // By not freeing, we prevent step #4 until GC is done. 245 stackpool[order].remove(s) 246 s.stackfreelist = 0 247 mheap_.freeStack(s) 248 } 249 } 250 251 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 252 // The pool is required to prevent unlimited growth of per-thread caches. 253 // 254 //go:systemstack 255 func stackcacherefill(c *mcache, order uint8) { 256 if stackDebug >= 1 { 257 print("stackcacherefill order=", order, "\n") 258 } 259 260 // Grab some stacks from the global cache. 261 // Grab half of the allowed capacity (to prevent thrashing). 262 var list gclinkptr 263 var size uintptr 264 lock(&stackpoolmu) 265 for size < _StackCacheSize/2 { 266 x := stackpoolalloc(order) 267 x.ptr().next = list 268 list = x 269 size += _FixedStack << order 270 } 271 unlock(&stackpoolmu) 272 c.stackcache[order].list = list 273 c.stackcache[order].size = size 274 } 275 276 //go:systemstack 277 func stackcacherelease(c *mcache, order uint8) { 278 if stackDebug >= 1 { 279 print("stackcacherelease order=", order, "\n") 280 } 281 x := c.stackcache[order].list 282 size := c.stackcache[order].size 283 lock(&stackpoolmu) 284 for size > _StackCacheSize/2 { 285 y := x.ptr().next 286 stackpoolfree(x, order) 287 x = y 288 size -= _FixedStack << order 289 } 290 unlock(&stackpoolmu) 291 c.stackcache[order].list = x 292 c.stackcache[order].size = size 293 } 294 295 //go:systemstack 296 func stackcache_clear(c *mcache) { 297 if stackDebug >= 1 { 298 print("stackcache clear\n") 299 } 300 lock(&stackpoolmu) 301 for order := uint8(0); order < _NumStackOrders; order++ { 302 x := c.stackcache[order].list 303 for x.ptr() != nil { 304 y := x.ptr().next 305 stackpoolfree(x, order) 306 x = y 307 } 308 c.stackcache[order].list = 0 309 c.stackcache[order].size = 0 310 } 311 unlock(&stackpoolmu) 312 } 313 314 // stackalloc allocates an n byte stack. 315 // 316 // stackalloc must run on the system stack because it uses per-P 317 // resources and must not split the stack. 318 // 319 //go:systemstack 320 func stackalloc(n uint32) (stack, []stkbar) { 321 // Stackalloc must be called on scheduler stack, so that we 322 // never try to grow the stack during the code that stackalloc runs. 323 // Doing so would cause a deadlock (issue 1547). 324 thisg := getg() 325 if thisg != thisg.m.g0 { 326 throw("stackalloc not on scheduler stack") 327 } 328 if n&(n-1) != 0 { 329 throw("stack size not a power of 2") 330 } 331 if stackDebug >= 1 { 332 print("stackalloc ", n, "\n") 333 } 334 335 // Compute the size of stack barrier array. 336 maxstkbar := gcMaxStackBarriers(int(n)) 337 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 338 339 if debug.efence != 0 || stackFromSystem != 0 { 340 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys) 341 if v == nil { 342 throw("out of memory (stackalloc)") 343 } 344 top := uintptr(n) - nstkbar 345 stkbarSlice := slice{add(v, top), 0, maxstkbar} 346 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 347 } 348 349 // Small stacks are allocated with a fixed-size free-list allocator. 350 // If we need a stack of a bigger size, we fall back on allocating 351 // a dedicated span. 352 var v unsafe.Pointer 353 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 354 order := uint8(0) 355 n2 := n 356 for n2 > _FixedStack { 357 order++ 358 n2 >>= 1 359 } 360 var x gclinkptr 361 c := thisg.m.mcache 362 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 363 // c == nil can happen in the guts of exitsyscall or 364 // procresize. Just get a stack from the global pool. 365 // Also don't touch stackcache during gc 366 // as it's flushed concurrently. 367 lock(&stackpoolmu) 368 x = stackpoolalloc(order) 369 unlock(&stackpoolmu) 370 } else { 371 x = c.stackcache[order].list 372 if x.ptr() == nil { 373 stackcacherefill(c, order) 374 x = c.stackcache[order].list 375 } 376 c.stackcache[order].list = x.ptr().next 377 c.stackcache[order].size -= uintptr(n) 378 } 379 v = unsafe.Pointer(x) 380 } else { 381 var s *mspan 382 npage := uintptr(n) >> _PageShift 383 log2npage := stacklog2(npage) 384 385 // Try to get a stack from the large stack cache. 386 lock(&stackLarge.lock) 387 if !stackLarge.free[log2npage].isEmpty() { 388 s = stackLarge.free[log2npage].first 389 stackLarge.free[log2npage].remove(s) 390 } 391 unlock(&stackLarge.lock) 392 393 if s == nil { 394 // Allocate a new stack from the heap. 395 s = mheap_.allocStack(npage) 396 if s == nil { 397 throw("out of memory") 398 } 399 } 400 v = unsafe.Pointer(s.base()) 401 } 402 403 if raceenabled { 404 racemalloc(v, uintptr(n)) 405 } 406 if msanenabled { 407 msanmalloc(v, uintptr(n)) 408 } 409 if stackDebug >= 1 { 410 print(" allocated ", v, "\n") 411 } 412 top := uintptr(n) - nstkbar 413 stkbarSlice := slice{add(v, top), 0, maxstkbar} 414 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 415 } 416 417 // stackfree frees an n byte stack allocation at stk. 418 // 419 // stackfree must run on the system stack because it uses per-P 420 // resources and must not split the stack. 421 // 422 //go:systemstack 423 func stackfree(stk stack, n uintptr) { 424 gp := getg() 425 v := unsafe.Pointer(stk.lo) 426 if n&(n-1) != 0 { 427 throw("stack not a power of 2") 428 } 429 if stk.lo+n < stk.hi { 430 throw("bad stack size") 431 } 432 if stackDebug >= 1 { 433 println("stackfree", v, n) 434 memclr(v, n) // for testing, clobber stack data 435 } 436 if debug.efence != 0 || stackFromSystem != 0 { 437 if debug.efence != 0 || stackFaultOnFree != 0 { 438 sysFault(v, n) 439 } else { 440 sysFree(v, n, &memstats.stacks_sys) 441 } 442 return 443 } 444 if msanenabled { 445 msanfree(v, n) 446 } 447 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 448 order := uint8(0) 449 n2 := n 450 for n2 > _FixedStack { 451 order++ 452 n2 >>= 1 453 } 454 x := gclinkptr(v) 455 c := gp.m.mcache 456 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 457 lock(&stackpoolmu) 458 stackpoolfree(x, order) 459 unlock(&stackpoolmu) 460 } else { 461 if c.stackcache[order].size >= _StackCacheSize { 462 stackcacherelease(c, order) 463 } 464 x.ptr().next = c.stackcache[order].list 465 c.stackcache[order].list = x 466 c.stackcache[order].size += n 467 } 468 } else { 469 s := mheap_.lookup(v) 470 if s.state != _MSpanStack { 471 println(hex(s.base()), v) 472 throw("bad span state") 473 } 474 if gcphase == _GCoff { 475 // Free the stack immediately if we're 476 // sweeping. 477 mheap_.freeStack(s) 478 } else { 479 // If the GC is running, we can't return a 480 // stack span to the heap because it could be 481 // reused as a heap span, and this state 482 // change would race with GC. Add it to the 483 // large stack cache instead. 484 log2npage := stacklog2(s.npages) 485 lock(&stackLarge.lock) 486 stackLarge.free[log2npage].insert(s) 487 unlock(&stackLarge.lock) 488 } 489 } 490 } 491 492 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 493 494 var ptrnames = []string{ 495 0: "scalar", 496 1: "ptr", 497 } 498 499 // Stack frame layout 500 // 501 // (x86) 502 // +------------------+ 503 // | args from caller | 504 // +------------------+ <- frame->argp 505 // | return address | 506 // +------------------+ 507 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 508 // +------------------+ <- frame->varp 509 // | locals | 510 // +------------------+ 511 // | args to callee | 512 // +------------------+ <- frame->sp 513 // 514 // (arm) 515 // +------------------+ 516 // | args from caller | 517 // +------------------+ <- frame->argp 518 // | caller's retaddr | 519 // +------------------+ <- frame->varp 520 // | locals | 521 // +------------------+ 522 // | args to callee | 523 // +------------------+ 524 // | return address | 525 // +------------------+ <- frame->sp 526 527 type adjustinfo struct { 528 old stack 529 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 530 cache pcvalueCache 531 532 // sghi is the highest sudog.elem on the stack. 533 sghi uintptr 534 } 535 536 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 537 // If so, it rewrites *vpp to point into the new stack. 538 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 539 pp := (*uintptr)(vpp) 540 p := *pp 541 if stackDebug >= 4 { 542 print(" ", pp, ":", hex(p), "\n") 543 } 544 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 545 *pp = p + adjinfo.delta 546 if stackDebug >= 3 { 547 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 548 } 549 } 550 } 551 552 // Information from the compiler about the layout of stack frames. 553 type bitvector struct { 554 n int32 // # of bits 555 bytedata *uint8 556 } 557 558 type gobitvector struct { 559 n uintptr 560 bytedata []uint8 561 } 562 563 func gobv(bv bitvector) gobitvector { 564 return gobitvector{ 565 uintptr(bv.n), 566 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 567 } 568 } 569 570 func ptrbit(bv *gobitvector, i uintptr) uint8 { 571 return (bv.bytedata[i/8] >> (i % 8)) & 1 572 } 573 574 // bv describes the memory starting at address scanp. 575 // Adjust any pointers contained therein. 576 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 577 bv := gobv(*cbv) 578 minp := adjinfo.old.lo 579 maxp := adjinfo.old.hi 580 delta := adjinfo.delta 581 num := bv.n 582 // If this frame might contain channel receive slots, use CAS 583 // to adjust pointers. If the slot hasn't been received into 584 // yet, it may contain stack pointers and a concurrent send 585 // could race with adjusting those pointers. (The sent value 586 // itself can never contain stack pointers.) 587 useCAS := uintptr(scanp) < adjinfo.sghi 588 for i := uintptr(0); i < num; i++ { 589 if stackDebug >= 4 { 590 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 591 } 592 if ptrbit(&bv, i) == 1 { 593 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 594 retry: 595 p := *pp 596 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 { 597 // Looks like a junk value in a pointer slot. 598 // Live analysis wrong? 599 getg().m.traceback = 2 600 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 601 throw("invalid stack pointer") 602 } 603 if minp <= p && p < maxp { 604 if stackDebug >= 3 { 605 print("adjust ptr ", p, " ", funcname(f), "\n") 606 } 607 if useCAS { 608 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 609 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 610 goto retry 611 } 612 } else { 613 *pp = p + delta 614 } 615 } 616 } 617 } 618 } 619 620 // Note: the argument/return area is adjusted by the callee. 621 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 622 adjinfo := (*adjustinfo)(arg) 623 targetpc := frame.continpc 624 if targetpc == 0 { 625 // Frame is dead. 626 return true 627 } 628 f := frame.fn 629 if stackDebug >= 2 { 630 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 631 } 632 if f.entry == systemstack_switchPC { 633 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 634 // We will allow it to be copied even though we don't 635 // have full GC info for it (because it is written in asm). 636 return true 637 } 638 if targetpc != f.entry { 639 targetpc-- 640 } 641 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 642 if pcdata == -1 { 643 pcdata = 0 // in prologue 644 } 645 646 // Adjust local variables if stack frame has been allocated. 647 size := frame.varp - frame.sp 648 var minsize uintptr 649 switch sys.ArchFamily { 650 case sys.ARM64: 651 minsize = sys.SpAlign 652 default: 653 minsize = sys.MinFrameSize 654 } 655 if size > minsize { 656 var bv bitvector 657 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 658 if stackmap == nil || stackmap.n <= 0 { 659 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 660 throw("missing stackmap") 661 } 662 // Locals bitmap information, scan just the pointers in locals. 663 if pcdata < 0 || pcdata >= stackmap.n { 664 // don't know where we are 665 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 666 throw("bad symbol table") 667 } 668 bv = stackmapdata(stackmap, pcdata) 669 size = uintptr(bv.n) * sys.PtrSize 670 if stackDebug >= 3 { 671 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 672 } 673 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 674 } 675 676 // Adjust saved base pointer if there is one. 677 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 678 if !framepointer_enabled { 679 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 680 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 681 throw("bad frame layout") 682 } 683 if stackDebug >= 3 { 684 print(" saved bp\n") 685 } 686 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 687 } 688 689 // Adjust arguments. 690 if frame.arglen > 0 { 691 var bv bitvector 692 if frame.argmap != nil { 693 bv = *frame.argmap 694 } else { 695 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 696 if stackmap == nil || stackmap.n <= 0 { 697 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 698 throw("missing stackmap") 699 } 700 if pcdata < 0 || pcdata >= stackmap.n { 701 // don't know where we are 702 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 703 throw("bad symbol table") 704 } 705 bv = stackmapdata(stackmap, pcdata) 706 } 707 if stackDebug >= 3 { 708 print(" args\n") 709 } 710 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 711 } 712 return true 713 } 714 715 func adjustctxt(gp *g, adjinfo *adjustinfo) { 716 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 717 } 718 719 func adjustdefers(gp *g, adjinfo *adjustinfo) { 720 // Adjust defer argument blocks the same way we adjust active stack frames. 721 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 722 723 // Adjust pointers in the Defer structs. 724 // Defer structs themselves are never on the stack. 725 for d := gp._defer; d != nil; d = d.link { 726 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 727 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 728 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 729 } 730 } 731 732 func adjustpanics(gp *g, adjinfo *adjustinfo) { 733 // Panics are on stack and already adjusted. 734 // Update pointer to head of list in G. 735 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 736 } 737 738 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 739 // the data elements pointed to by a SudoG structure 740 // might be in the stack. 741 for s := gp.waiting; s != nil; s = s.waitlink { 742 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 743 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone)) 744 } 745 } 746 747 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 748 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 749 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr)) 750 } 751 } 752 753 func fillstack(stk stack, b byte) { 754 for p := stk.lo; p < stk.hi; p++ { 755 *(*byte)(unsafe.Pointer(p)) = b 756 } 757 } 758 759 func findsghi(gp *g, stk stack) uintptr { 760 var sghi uintptr 761 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 762 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 763 if stk.lo <= p && p < stk.hi && p > sghi { 764 sghi = p 765 } 766 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone) 767 if stk.lo <= p && p < stk.hi && p > sghi { 768 sghi = p 769 } 770 } 771 return sghi 772 } 773 774 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 775 // stack they refer to while synchronizing with concurrent channel 776 // operations. It returns the number of bytes of stack copied. 777 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 778 if gp.waiting == nil { 779 return 0 780 } 781 782 // Lock channels to prevent concurrent send/receive. 783 // It's important that we *only* do this for async 784 // copystack; otherwise, gp may be in the middle of 785 // putting itself on wait queues and this would 786 // self-deadlock. 787 var lastc *hchan 788 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 789 if sg.c != lastc { 790 lock(&sg.c.lock) 791 } 792 lastc = sg.c 793 } 794 795 // Adjust sudogs. 796 adjustsudogs(gp, adjinfo) 797 798 // Copy the part of the stack the sudogs point in to 799 // while holding the lock to prevent races on 800 // send/receive slots. 801 var sgsize uintptr 802 if adjinfo.sghi != 0 { 803 oldBot := adjinfo.old.hi - used 804 newBot := oldBot + adjinfo.delta 805 sgsize = adjinfo.sghi - oldBot 806 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 807 } 808 809 // Unlock channels. 810 lastc = nil 811 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 812 if sg.c != lastc { 813 unlock(&sg.c.lock) 814 } 815 lastc = sg.c 816 } 817 818 return sgsize 819 } 820 821 // Copies gp's stack to a new stack of a different size. 822 // Caller must have changed gp status to Gcopystack. 823 // 824 // If sync is true, this is a self-triggered stack growth and, in 825 // particular, no other G may be writing to gp's stack (e.g., via a 826 // channel operation). If sync is false, copystack protects against 827 // concurrent channel operations. 828 func copystack(gp *g, newsize uintptr, sync bool) { 829 if gp.syscallsp != 0 { 830 throw("stack growth not allowed in system call") 831 } 832 old := gp.stack 833 if old.lo == 0 { 834 throw("nil stackbase") 835 } 836 used := old.hi - gp.sched.sp 837 838 // allocate new stack 839 new, newstkbar := stackalloc(uint32(newsize)) 840 if stackPoisonCopy != 0 { 841 fillstack(new, 0xfd) 842 } 843 if stackDebug >= 1 { 844 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 845 } 846 847 // Compute adjustment. 848 var adjinfo adjustinfo 849 adjinfo.old = old 850 adjinfo.delta = new.hi - old.hi 851 852 // Adjust sudogs, synchronizing with channel ops if necessary. 853 ncopy := used 854 if sync { 855 adjustsudogs(gp, &adjinfo) 856 } else { 857 // sudogs can point in to the stack. During concurrent 858 // shrinking, these areas may be written to. Find the 859 // highest such pointer so we can handle everything 860 // there and below carefully. (This shouldn't be far 861 // from the bottom of the stack, so there's little 862 // cost in handling everything below it carefully.) 863 adjinfo.sghi = findsghi(gp, old) 864 865 // Synchronize with channel ops and copy the part of 866 // the stack they may interact with. 867 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 868 } 869 870 // Copy the stack (or the rest of it) to the new location 871 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 872 873 // Disallow sigprof scans of this stack and block if there's 874 // one in progress. 875 gcLockStackBarriers(gp) 876 877 // Adjust remaining structures that have pointers into stacks. 878 // We have to do most of these before we traceback the new 879 // stack because gentraceback uses them. 880 adjustctxt(gp, &adjinfo) 881 adjustdefers(gp, &adjinfo) 882 adjustpanics(gp, &adjinfo) 883 adjuststkbar(gp, &adjinfo) 884 if adjinfo.sghi != 0 { 885 adjinfo.sghi += adjinfo.delta 886 } 887 888 // copy old stack barriers to new stack barrier array 889 newstkbar = newstkbar[:len(gp.stkbar)] 890 copy(newstkbar, gp.stkbar) 891 892 // Swap out old stack for new one 893 gp.stack = new 894 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 895 gp.sched.sp = new.hi - used 896 oldsize := gp.stackAlloc 897 gp.stackAlloc = newsize 898 gp.stkbar = newstkbar 899 gp.stktopsp += adjinfo.delta 900 901 // Adjust pointers in the new stack. 902 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 903 904 gcUnlockStackBarriers(gp) 905 906 // free old stack 907 if stackPoisonCopy != 0 { 908 fillstack(old, 0xfc) 909 } 910 stackfree(old, oldsize) 911 } 912 913 // round x up to a power of 2. 914 func round2(x int32) int32 { 915 s := uint(0) 916 for 1<<s < x { 917 s++ 918 } 919 return 1 << s 920 } 921 922 // Called from runtime·morestack when more stack is needed. 923 // Allocate larger stack and relocate to new stack. 924 // Stack growth is multiplicative, for constant amortized cost. 925 // 926 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 927 // If the GC is trying to stop this g then it will set preemptscan to true. 928 func newstack() { 929 thisg := getg() 930 // TODO: double check all gp. shouldn't be getg(). 931 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 932 throw("stack growth after fork") 933 } 934 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 935 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 936 morebuf := thisg.m.morebuf 937 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 938 throw("runtime: wrong goroutine in newstack") 939 } 940 if thisg.m.curg.throwsplit { 941 gp := thisg.m.curg 942 // Update syscallsp, syscallpc in case traceback uses them. 943 morebuf := thisg.m.morebuf 944 gp.syscallsp = morebuf.sp 945 gp.syscallpc = morebuf.pc 946 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 947 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 948 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 949 950 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 951 throw("runtime: stack split at bad time") 952 } 953 954 gp := thisg.m.curg 955 morebuf := thisg.m.morebuf 956 thisg.m.morebuf.pc = 0 957 thisg.m.morebuf.lr = 0 958 thisg.m.morebuf.sp = 0 959 thisg.m.morebuf.g = 0 960 rewindmorestack(&gp.sched) 961 962 // NOTE: stackguard0 may change underfoot, if another thread 963 // is about to try to preempt gp. Read it just once and use that same 964 // value now and below. 965 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 966 967 // Be conservative about where we preempt. 968 // We are interested in preempting user Go code, not runtime code. 969 // If we're holding locks, mallocing, or preemption is disabled, don't 970 // preempt. 971 // This check is very early in newstack so that even the status change 972 // from Grunning to Gwaiting and back doesn't happen in this case. 973 // That status change by itself can be viewed as a small preemption, 974 // because the GC might change Gwaiting to Gscanwaiting, and then 975 // this goroutine has to wait for the GC to finish before continuing. 976 // If the GC is in some way dependent on this goroutine (for example, 977 // it needs a lock held by the goroutine), that small preemption turns 978 // into a real deadlock. 979 if preempt { 980 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 981 // Let the goroutine keep running for now. 982 // gp->preempt is set, so it will be preempted next time. 983 gp.stackguard0 = gp.stack.lo + _StackGuard 984 gogo(&gp.sched) // never return 985 } 986 } 987 988 if gp.stack.lo == 0 { 989 throw("missing stack in newstack") 990 } 991 sp := gp.sched.sp 992 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 993 // The call to morestack cost a word. 994 sp -= sys.PtrSize 995 } 996 if stackDebug >= 1 || sp < gp.stack.lo { 997 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 998 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 999 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1000 } 1001 if sp < gp.stack.lo { 1002 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 1003 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1004 throw("runtime: split stack overflow") 1005 } 1006 1007 if gp.sched.ctxt != nil { 1008 // morestack wrote sched.ctxt on its way in here, 1009 // without a write barrier. Run the write barrier now. 1010 // It is not possible to be preempted between then 1011 // and now, so it's okay. 1012 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) 1013 } 1014 1015 if preempt { 1016 if gp == thisg.m.g0 { 1017 throw("runtime: preempt g0") 1018 } 1019 if thisg.m.p == 0 && thisg.m.locks == 0 { 1020 throw("runtime: g is running but p is not") 1021 } 1022 // Synchronize with scang. 1023 casgstatus(gp, _Grunning, _Gwaiting) 1024 if gp.preemptscan { 1025 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1026 // Likely to be racing with the GC as 1027 // it sees a _Gwaiting and does the 1028 // stack scan. If so, gcworkdone will 1029 // be set and gcphasework will simply 1030 // return. 1031 } 1032 if !gp.gcscandone { 1033 // gcw is safe because we're on the 1034 // system stack. 1035 gcw := &gp.m.p.ptr().gcw 1036 scanstack(gp, gcw) 1037 if gcBlackenPromptly { 1038 gcw.dispose() 1039 } 1040 gp.gcscandone = true 1041 } 1042 gp.preemptscan = false 1043 gp.preempt = false 1044 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1045 // This clears gcscanvalid. 1046 casgstatus(gp, _Gwaiting, _Grunning) 1047 gp.stackguard0 = gp.stack.lo + _StackGuard 1048 gogo(&gp.sched) // never return 1049 } 1050 1051 // Act like goroutine called runtime.Gosched. 1052 casgstatus(gp, _Gwaiting, _Grunning) 1053 gopreempt_m(gp) // never return 1054 } 1055 1056 // Allocate a bigger segment and move the stack. 1057 oldsize := int(gp.stackAlloc) 1058 newsize := oldsize * 2 1059 if uintptr(newsize) > maxstacksize { 1060 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1061 throw("stack overflow") 1062 } 1063 1064 // The goroutine must be executing in order to call newstack, 1065 // so it must be Grunning (or Gscanrunning). 1066 casgstatus(gp, _Grunning, _Gcopystack) 1067 1068 // The concurrent GC will not scan the stack while we are doing the copy since 1069 // the gp is in a Gcopystack status. 1070 copystack(gp, uintptr(newsize), true) 1071 if stackDebug >= 1 { 1072 print("stack grow done\n") 1073 } 1074 casgstatus(gp, _Gcopystack, _Grunning) 1075 gogo(&gp.sched) 1076 } 1077 1078 //go:nosplit 1079 func nilfunc() { 1080 *(*uint8)(nil) = 0 1081 } 1082 1083 // adjust Gobuf as if it executed a call to fn 1084 // and then did an immediate gosave. 1085 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1086 var fn unsafe.Pointer 1087 if fv != nil { 1088 fn = unsafe.Pointer(fv.fn) 1089 } else { 1090 fn = unsafe.Pointer(funcPC(nilfunc)) 1091 } 1092 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1093 } 1094 1095 // Maybe shrink the stack being used by gp. 1096 // Called at garbage collection time. 1097 // gp must be stopped, but the world need not be. 1098 func shrinkstack(gp *g) { 1099 gstatus := readgstatus(gp) 1100 if gstatus&^_Gscan == _Gdead { 1101 if gp.stack.lo != 0 { 1102 // Free whole stack - it will get reallocated 1103 // if G is used again. 1104 stackfree(gp.stack, gp.stackAlloc) 1105 gp.stack.lo = 0 1106 gp.stack.hi = 0 1107 gp.stkbar = nil 1108 gp.stkbarPos = 0 1109 } 1110 return 1111 } 1112 if gp.stack.lo == 0 { 1113 throw("missing stack in shrinkstack") 1114 } 1115 if gstatus&_Gscan == 0 { 1116 throw("bad status in shrinkstack") 1117 } 1118 1119 if debug.gcshrinkstackoff > 0 { 1120 return 1121 } 1122 1123 oldsize := gp.stackAlloc 1124 newsize := oldsize / 2 1125 // Don't shrink the allocation below the minimum-sized stack 1126 // allocation. 1127 if newsize < _FixedStack { 1128 return 1129 } 1130 // Compute how much of the stack is currently in use and only 1131 // shrink the stack if gp is using less than a quarter of its 1132 // current stack. The currently used stack includes everything 1133 // down to the SP plus the stack guard space that ensures 1134 // there's room for nosplit functions. 1135 avail := gp.stack.hi - gp.stack.lo 1136 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1137 return 1138 } 1139 1140 // We can't copy the stack if we're in a syscall. 1141 // The syscall might have pointers into the stack. 1142 if gp.syscallsp != 0 { 1143 return 1144 } 1145 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1146 return 1147 } 1148 1149 if stackDebug > 0 { 1150 print("shrinking stack ", oldsize, "->", newsize, "\n") 1151 } 1152 1153 copystack(gp, newsize, false) 1154 } 1155 1156 // freeStackSpans frees unused stack spans at the end of GC. 1157 func freeStackSpans() { 1158 lock(&stackpoolmu) 1159 1160 // Scan stack pools for empty stack spans. 1161 for order := range stackpool { 1162 list := &stackpool[order] 1163 for s := list.first; s != nil; { 1164 next := s.next 1165 if s.allocCount == 0 { 1166 list.remove(s) 1167 s.stackfreelist = 0 1168 mheap_.freeStack(s) 1169 } 1170 s = next 1171 } 1172 } 1173 1174 unlock(&stackpoolmu) 1175 1176 // Free large stack spans. 1177 lock(&stackLarge.lock) 1178 for i := range stackLarge.free { 1179 for s := stackLarge.free[i].first; s != nil; { 1180 next := s.next 1181 stackLarge.free[i].remove(s) 1182 mheap_.freeStack(s) 1183 s = next 1184 } 1185 } 1186 unlock(&stackLarge.lock) 1187 } 1188 1189 //go:nosplit 1190 func morestackc() { 1191 systemstack(func() { 1192 throw("attempt to execute C code on Go stack") 1193 }) 1194 }