github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // 初始g的栈大小 71 // The minimum size of stack used by Go code 72 _StackMin = 2048 73 74 // The minimum stack size to allocate. 75 // The hackery here rounds FixedStack0 up to a power of 2. 76 _FixedStack0 = _StackMin + _StackSystem 77 _FixedStack1 = _FixedStack0 - 1 78 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 79 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 80 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 81 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 82 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 83 _FixedStack = _FixedStack6 + 1 84 85 // Functions that need frames bigger than this use an extra 86 // instruction to do the stack split check, to avoid overflow 87 // in case SP - framesize wraps below zero. 88 // This value can be no bigger than the size of the unmapped 89 // space at zero. 90 _StackBig = 4096 91 92 // The stack guard is a pointer this many bytes above the 93 // bottom of the stack. 94 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem 95 96 // After a stack split check the SP is allowed to be this 97 // many bytes below the stack guard. This saves an instruction 98 // in the checking sequence for tiny frames. 99 _StackSmall = 128 100 101 // The maximum number of bytes that a chain of NOSPLIT 102 // functions can use. 103 _StackLimit = _StackGuard - _StackSystem - _StackSmall 104 ) 105 106 const ( 107 // stackDebug == 0: no logging 108 // == 1: logging of per-stack operations 109 // == 2: logging of per-frame operations 110 // == 3: logging of per-word updates 111 // == 4: logging of per-word reads 112 stackDebug = 0 113 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 114 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 115 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 116 stackNoCache = 0 // disable per-P small stack caches 117 118 // check the BP links during traceback. 119 debugCheckBP = false 120 ) 121 122 const ( 123 uintptrMask = 1<<(8*sys.PtrSize) - 1 124 125 // Goroutine preemption request. 126 // Stored into g->stackguard0 to cause split stack check failure. 127 // Must be greater than any real sp. 128 // 0xfffffade in hex. 129 stackPreempt = uintptrMask & -1314 130 131 // Thread is forking. 132 // Stored into g->stackguard0 to cause split stack check failure. 133 // Must be greater than any real sp. 134 stackFork = uintptrMask & -1234 135 ) 136 137 // Global pool of spans that have free stacks. 138 // Stacks are assigned an order according to size. 139 // order = log_2(size/FixedStack) 140 // There is a free list for each order. 141 // TODO: one lock per order? 142 var stackpool [_NumStackOrders]mSpanList 143 var stackpoolmu mutex 144 145 // Global pool of large stack spans. 146 var stackLarge struct { 147 lock mutex 148 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 149 } 150 151 func stackinit() { 152 if _StackCacheSize&_PageMask != 0 { 153 throw("cache size must be a multiple of page size") 154 } 155 for i := range stackpool { 156 stackpool[i].init() 157 } 158 for i := range stackLarge.free { 159 stackLarge.free[i].init() 160 } 161 } 162 163 // stacklog2 returns ⌊log_2(n)⌋. 164 func stacklog2(n uintptr) int { 165 log2 := 0 166 for n > 1 { 167 n >>= 1 168 log2++ 169 } 170 return log2 171 } 172 173 // Allocates a stack from the free pool. Must be called with 174 // stackpoolmu held. 175 func stackpoolalloc(order uint8) gclinkptr { 176 list := &stackpool[order] 177 s := list.first 178 if s == nil { 179 // no free stacks. Allocate another span worth. 180 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) 181 if s == nil { 182 throw("out of memory") 183 } 184 if s.allocCount != 0 { 185 throw("bad allocCount") 186 } 187 if s.manualFreeList.ptr() != nil { 188 throw("bad manualFreeList") 189 } 190 s.elemsize = _FixedStack << order 191 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { 192 x := gclinkptr(s.base() + i) 193 x.ptr().next = s.manualFreeList 194 s.manualFreeList = x 195 } 196 list.insert(s) 197 } 198 x := s.manualFreeList 199 if x.ptr() == nil { 200 throw("span has no free stacks") 201 } 202 s.manualFreeList = x.ptr().next 203 s.allocCount++ 204 if s.manualFreeList.ptr() == nil { 205 // all stacks in s are allocated. 206 list.remove(s) 207 } 208 return x 209 } 210 211 // Adds stack x to the free pool. Must be called with stackpoolmu held. 212 func stackpoolfree(x gclinkptr, order uint8) { 213 s := mheap_.lookup(unsafe.Pointer(x)) 214 if s.state != _MSpanManual { 215 throw("freeing stack not in a stack span") 216 } 217 if s.manualFreeList.ptr() == nil { 218 // s will now have a free stack 219 stackpool[order].insert(s) 220 } 221 x.ptr().next = s.manualFreeList 222 s.manualFreeList = x 223 s.allocCount-- 224 if gcphase == _GCoff && s.allocCount == 0 { 225 // Span is completely free. Return it to the heap 226 // immediately if we're sweeping. 227 // 228 // If GC is active, we delay the free until the end of 229 // GC to avoid the following type of situation: 230 // 231 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 232 // 2) The stack that pointer points to is copied 233 // 3) The old stack is freed 234 // 4) The containing span is marked free 235 // 5) GC attempts to mark the SudoG.elem pointer. The 236 // marking fails because the pointer looks like a 237 // pointer into a free span. 238 // 239 // By not freeing, we prevent step #4 until GC is done. 240 stackpool[order].remove(s) 241 s.manualFreeList = 0 242 mheap_.freeManual(s, &memstats.stacks_inuse) 243 } 244 } 245 246 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 247 // The pool is required to prevent unlimited growth of per-thread caches. 248 // 249 //go:systemstack 250 func stackcacherefill(c *mcache, order uint8) { 251 if stackDebug >= 1 { 252 print("stackcacherefill order=", order, "\n") 253 } 254 255 // Grab some stacks from the global cache. 256 // Grab half of the allowed capacity (to prevent thrashing). 257 var list gclinkptr 258 var size uintptr 259 lock(&stackpoolmu) 260 for size < _StackCacheSize/2 { 261 x := stackpoolalloc(order) 262 x.ptr().next = list 263 list = x 264 size += _FixedStack << order 265 } 266 unlock(&stackpoolmu) 267 c.stackcache[order].list = list 268 c.stackcache[order].size = size 269 } 270 271 //go:systemstack 272 func stackcacherelease(c *mcache, order uint8) { 273 if stackDebug >= 1 { 274 print("stackcacherelease order=", order, "\n") 275 } 276 x := c.stackcache[order].list 277 size := c.stackcache[order].size 278 lock(&stackpoolmu) 279 for size > _StackCacheSize/2 { 280 y := x.ptr().next 281 stackpoolfree(x, order) 282 x = y 283 size -= _FixedStack << order 284 } 285 unlock(&stackpoolmu) 286 c.stackcache[order].list = x 287 c.stackcache[order].size = size 288 } 289 290 //go:systemstack 291 func stackcache_clear(c *mcache) { 292 if stackDebug >= 1 { 293 print("stackcache clear\n") 294 } 295 lock(&stackpoolmu) 296 for order := uint8(0); order < _NumStackOrders; order++ { 297 x := c.stackcache[order].list 298 for x.ptr() != nil { 299 y := x.ptr().next 300 stackpoolfree(x, order) 301 x = y 302 } 303 c.stackcache[order].list = 0 304 c.stackcache[order].size = 0 305 } 306 unlock(&stackpoolmu) 307 } 308 309 // stackalloc allocates an n byte stack. 310 // 311 // stackalloc must run on the system stack because it uses per-P 312 // resources and must not split the stack. 313 // 314 //go:systemstack 315 func stackalloc(n uint32) stack { 316 // Stackalloc must be called on scheduler stack, so that we 317 // never try to grow the stack during the code that stackalloc runs. 318 // Doing so would cause a deadlock (issue 1547). 319 thisg := getg() 320 if thisg != thisg.m.g0 { 321 throw("stackalloc not on scheduler stack") 322 } 323 if n&(n-1) != 0 { 324 throw("stack size not a power of 2") 325 } 326 if stackDebug >= 1 { 327 print("stackalloc ", n, "\n") 328 } 329 330 if debug.efence != 0 || stackFromSystem != 0 { 331 n = uint32(round(uintptr(n), physPageSize)) 332 v := sysAlloc(uintptr(n), &memstats.stacks_sys) 333 if v == nil { 334 throw("out of memory (stackalloc)") 335 } 336 return stack{uintptr(v), uintptr(v) + uintptr(n)} 337 } 338 339 // Small stacks are allocated with a fixed-size free-list allocator. 340 // If we need a stack of a bigger size, we fall back on allocating 341 // a dedicated span. 342 var v unsafe.Pointer 343 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 344 order := uint8(0) 345 n2 := n 346 for n2 > _FixedStack { 347 order++ 348 n2 >>= 1 349 } 350 var x gclinkptr 351 c := thisg.m.mcache 352 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 353 // c == nil can happen in the guts of exitsyscall or 354 // procresize. Just get a stack from the global pool. 355 // Also don't touch stackcache during gc 356 // as it's flushed concurrently. 357 lock(&stackpoolmu) 358 x = stackpoolalloc(order) 359 unlock(&stackpoolmu) 360 } else { 361 x = c.stackcache[order].list 362 if x.ptr() == nil { 363 stackcacherefill(c, order) 364 x = c.stackcache[order].list 365 } 366 c.stackcache[order].list = x.ptr().next 367 c.stackcache[order].size -= uintptr(n) 368 } 369 v = unsafe.Pointer(x) 370 } else { 371 var s *mspan 372 npage := uintptr(n) >> _PageShift 373 log2npage := stacklog2(npage) 374 375 // Try to get a stack from the large stack cache. 376 lock(&stackLarge.lock) 377 if !stackLarge.free[log2npage].isEmpty() { 378 s = stackLarge.free[log2npage].first 379 stackLarge.free[log2npage].remove(s) 380 } 381 unlock(&stackLarge.lock) 382 383 if s == nil { 384 // Allocate a new stack from the heap. 385 s = mheap_.allocManual(npage, &memstats.stacks_inuse) 386 if s == nil { 387 throw("out of memory") 388 } 389 s.elemsize = uintptr(n) 390 } 391 v = unsafe.Pointer(s.base()) 392 } 393 394 if raceenabled { 395 racemalloc(v, uintptr(n)) 396 } 397 if msanenabled { 398 msanmalloc(v, uintptr(n)) 399 } 400 if stackDebug >= 1 { 401 print(" allocated ", v, "\n") 402 } 403 return stack{uintptr(v), uintptr(v) + uintptr(n)} 404 } 405 406 // stackfree frees an n byte stack allocation at stk. 407 // 408 // stackfree must run on the system stack because it uses per-P 409 // resources and must not split the stack. 410 // 411 //go:systemstack 412 func stackfree(stk stack) { 413 gp := getg() 414 v := unsafe.Pointer(stk.lo) 415 n := stk.hi - stk.lo 416 if n&(n-1) != 0 { 417 throw("stack not a power of 2") 418 } 419 if stk.lo+n < stk.hi { 420 throw("bad stack size") 421 } 422 if stackDebug >= 1 { 423 println("stackfree", v, n) 424 memclrNoHeapPointers(v, n) // for testing, clobber stack data 425 } 426 if debug.efence != 0 || stackFromSystem != 0 { 427 if debug.efence != 0 || stackFaultOnFree != 0 { 428 sysFault(v, n) 429 } else { 430 sysFree(v, n, &memstats.stacks_sys) 431 } 432 return 433 } 434 if msanenabled { 435 msanfree(v, n) 436 } 437 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 438 order := uint8(0) 439 n2 := n 440 for n2 > _FixedStack { 441 order++ 442 n2 >>= 1 443 } 444 x := gclinkptr(v) 445 c := gp.m.mcache 446 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 447 lock(&stackpoolmu) 448 stackpoolfree(x, order) 449 unlock(&stackpoolmu) 450 } else { 451 if c.stackcache[order].size >= _StackCacheSize { 452 stackcacherelease(c, order) 453 } 454 x.ptr().next = c.stackcache[order].list 455 c.stackcache[order].list = x 456 c.stackcache[order].size += n 457 } 458 } else { 459 s := mheap_.lookup(v) 460 if s.state != _MSpanManual { 461 println(hex(s.base()), v) 462 throw("bad span state") 463 } 464 if gcphase == _GCoff { 465 // Free the stack immediately if we're 466 // sweeping. 467 mheap_.freeManual(s, &memstats.stacks_inuse) 468 } else { 469 // If the GC is running, we can't return a 470 // stack span to the heap because it could be 471 // reused as a heap span, and this state 472 // change would race with GC. Add it to the 473 // large stack cache instead. 474 log2npage := stacklog2(s.npages) 475 lock(&stackLarge.lock) 476 stackLarge.free[log2npage].insert(s) 477 unlock(&stackLarge.lock) 478 } 479 } 480 } 481 482 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 483 484 var ptrnames = []string{ 485 0: "scalar", 486 1: "ptr", 487 } 488 489 // Stack frame layout 490 // 491 // (x86) 492 // +------------------+ 493 // | args from caller | 494 // +------------------+ <- frame->argp 495 // | return address | 496 // +------------------+ 497 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 498 // +------------------+ <- frame->varp 499 // | locals | 500 // +------------------+ 501 // | args to callee | 502 // +------------------+ <- frame->sp 503 // 504 // (arm) 505 // +------------------+ 506 // | args from caller | 507 // +------------------+ <- frame->argp 508 // | caller's retaddr | 509 // +------------------+ <- frame->varp 510 // | locals | 511 // +------------------+ 512 // | args to callee | 513 // +------------------+ 514 // | return address | 515 // +------------------+ <- frame->sp 516 517 type adjustinfo struct { 518 old stack 519 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 520 cache pcvalueCache 521 522 // sghi is the highest sudog.elem on the stack. 523 sghi uintptr 524 } 525 526 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 527 // If so, it rewrites *vpp to point into the new stack. 528 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 529 pp := (*uintptr)(vpp) 530 p := *pp 531 if stackDebug >= 4 { 532 print(" ", pp, ":", hex(p), "\n") 533 } 534 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 535 *pp = p + adjinfo.delta 536 if stackDebug >= 3 { 537 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 538 } 539 } 540 } 541 542 // Information from the compiler about the layout of stack frames. 543 type bitvector struct { 544 n int32 // # of bits 545 bytedata *uint8 546 } 547 548 type gobitvector struct { 549 n uintptr 550 bytedata []uint8 551 } 552 553 func gobv(bv bitvector) gobitvector { 554 return gobitvector{ 555 uintptr(bv.n), 556 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 557 } 558 } 559 560 func ptrbit(bv *gobitvector, i uintptr) uint8 { 561 return (bv.bytedata[i/8] >> (i % 8)) & 1 562 } 563 564 // bv describes the memory starting at address scanp. 565 // Adjust any pointers contained therein. 566 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) { 567 bv := gobv(*cbv) 568 minp := adjinfo.old.lo 569 maxp := adjinfo.old.hi 570 delta := adjinfo.delta 571 num := bv.n 572 // If this frame might contain channel receive slots, use CAS 573 // to adjust pointers. If the slot hasn't been received into 574 // yet, it may contain stack pointers and a concurrent send 575 // could race with adjusting those pointers. (The sent value 576 // itself can never contain stack pointers.) 577 useCAS := uintptr(scanp) < adjinfo.sghi 578 for i := uintptr(0); i < num; i++ { 579 if stackDebug >= 4 { 580 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 581 } 582 if ptrbit(&bv, i) != 1 { 583 continue 584 } 585 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 586 retry: 587 p := *pp 588 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 589 // Looks like a junk value in a pointer slot. 590 // Live analysis wrong? 591 getg().m.traceback = 2 592 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 593 throw("invalid pointer found on stack") 594 } 595 if minp <= p && p < maxp { 596 if stackDebug >= 3 { 597 print("adjust ptr ", hex(p), " ", funcname(f), "\n") 598 } 599 if useCAS { 600 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 601 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 602 goto retry 603 } 604 } else { 605 *pp = p + delta 606 } 607 } 608 } 609 } 610 611 // Note: the argument/return area is adjusted by the callee. 612 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 613 adjinfo := (*adjustinfo)(arg) 614 targetpc := frame.continpc 615 if targetpc == 0 { 616 // Frame is dead. 617 return true 618 } 619 f := frame.fn 620 if stackDebug >= 2 { 621 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 622 } 623 if f.funcID == funcID_systemstack_switch { 624 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 625 // We will allow it to be copied even though we don't 626 // have full GC info for it (because it is written in asm). 627 return true 628 } 629 if targetpc != f.entry { 630 targetpc-- 631 } 632 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 633 if pcdata == -1 { 634 pcdata = 0 // in prologue 635 } 636 637 // Adjust local variables if stack frame has been allocated. 638 size := frame.varp - frame.sp 639 var minsize uintptr 640 switch sys.ArchFamily { 641 case sys.ARM64: 642 minsize = sys.SpAlign 643 default: 644 minsize = sys.MinFrameSize 645 } 646 if size > minsize { 647 var bv bitvector 648 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 649 if stackmap == nil || stackmap.n <= 0 { 650 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 651 throw("missing stackmap") 652 } 653 // Locals bitmap information, scan just the pointers in locals. 654 if pcdata < 0 || pcdata >= stackmap.n { 655 // don't know where we are 656 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 657 throw("bad symbol table") 658 } 659 bv = stackmapdata(stackmap, pcdata) 660 size = uintptr(bv.n) * sys.PtrSize 661 if stackDebug >= 3 { 662 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 663 } 664 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 665 } 666 667 // Adjust saved base pointer if there is one. 668 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { 669 if !framepointer_enabled { 670 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 671 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 672 throw("bad frame layout") 673 } 674 if stackDebug >= 3 { 675 print(" saved bp\n") 676 } 677 if debugCheckBP { 678 // Frame pointers should always point to the next higher frame on 679 // the Go stack (or be nil, for the top frame on the stack). 680 bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 681 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 682 println("runtime: found invalid frame pointer") 683 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 684 throw("bad frame pointer") 685 } 686 } 687 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 688 } 689 690 // Adjust arguments. 691 if frame.arglen > 0 { 692 var bv bitvector 693 if frame.argmap != nil { 694 bv = *frame.argmap 695 } else { 696 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 697 if stackmap == nil || stackmap.n <= 0 { 698 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 699 throw("missing stackmap") 700 } 701 if pcdata < 0 || pcdata >= stackmap.n { 702 // don't know where we are 703 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 704 throw("bad symbol table") 705 } 706 bv = stackmapdata(stackmap, pcdata) 707 } 708 if stackDebug >= 3 { 709 print(" args\n") 710 } 711 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{}) 712 } 713 return true 714 } 715 716 func adjustctxt(gp *g, adjinfo *adjustinfo) { 717 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 718 if !framepointer_enabled { 719 return 720 } 721 if debugCheckBP { 722 bp := gp.sched.bp 723 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 724 println("runtime: found invalid top frame pointer") 725 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 726 throw("bad top frame pointer") 727 } 728 } 729 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) 730 } 731 732 func adjustdefers(gp *g, adjinfo *adjustinfo) { 733 // Adjust defer argument blocks the same way we adjust active stack frames. 734 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 735 736 // Adjust pointers in the Defer structs. 737 // Defer structs themselves are never on the stack. 738 for d := gp._defer; d != nil; d = d.link { 739 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 740 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 741 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 742 } 743 } 744 745 func adjustpanics(gp *g, adjinfo *adjustinfo) { 746 // Panics are on stack and already adjusted. 747 // Update pointer to head of list in G. 748 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 749 } 750 751 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 752 // the data elements pointed to by a SudoG structure 753 // might be in the stack. 754 for s := gp.waiting; s != nil; s = s.waitlink { 755 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 756 } 757 } 758 759 func fillstack(stk stack, b byte) { 760 for p := stk.lo; p < stk.hi; p++ { 761 *(*byte)(unsafe.Pointer(p)) = b 762 } 763 } 764 765 func findsghi(gp *g, stk stack) uintptr { 766 var sghi uintptr 767 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 768 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 769 if stk.lo <= p && p < stk.hi && p > sghi { 770 sghi = p 771 } 772 } 773 return sghi 774 } 775 776 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 777 // stack they refer to while synchronizing with concurrent channel 778 // operations. It returns the number of bytes of stack copied. 779 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 780 if gp.waiting == nil { 781 return 0 782 } 783 784 // Lock channels to prevent concurrent send/receive. 785 // It's important that we *only* do this for async 786 // copystack; otherwise, gp may be in the middle of 787 // putting itself on wait queues and this would 788 // self-deadlock. 789 var lastc *hchan 790 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 791 if sg.c != lastc { 792 lock(&sg.c.lock) 793 } 794 lastc = sg.c 795 } 796 797 // Adjust sudogs. 798 adjustsudogs(gp, adjinfo) 799 800 // Copy the part of the stack the sudogs point in to 801 // while holding the lock to prevent races on 802 // send/receive slots. 803 var sgsize uintptr 804 if adjinfo.sghi != 0 { 805 oldBot := adjinfo.old.hi - used 806 newBot := oldBot + adjinfo.delta 807 sgsize = adjinfo.sghi - oldBot 808 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 809 } 810 811 // Unlock channels. 812 lastc = nil 813 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 814 if sg.c != lastc { 815 unlock(&sg.c.lock) 816 } 817 lastc = sg.c 818 } 819 820 return sgsize 821 } 822 823 // Copies gp's stack to a new stack of a different size. 824 // Caller must have changed gp status to Gcopystack. 825 // 826 // If sync is true, this is a self-triggered stack growth and, in 827 // particular, no other G may be writing to gp's stack (e.g., via a 828 // channel operation). If sync is false, copystack protects against 829 // concurrent channel operations. 830 func copystack(gp *g, newsize uintptr, sync bool) { 831 if gp.syscallsp != 0 { 832 throw("stack growth not allowed in system call") 833 } 834 old := gp.stack 835 if old.lo == 0 { 836 throw("nil stackbase") 837 } 838 used := old.hi - gp.sched.sp 839 840 // allocate new stack 841 new := stackalloc(uint32(newsize)) 842 if stackPoisonCopy != 0 { 843 fillstack(new, 0xfd) 844 } 845 if stackDebug >= 1 { 846 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 847 } 848 849 // Compute adjustment. 850 var adjinfo adjustinfo 851 adjinfo.old = old 852 adjinfo.delta = new.hi - old.hi 853 854 // Adjust sudogs, synchronizing with channel ops if necessary. 855 ncopy := used 856 if sync { 857 adjustsudogs(gp, &adjinfo) 858 } else { 859 // sudogs can point in to the stack. During concurrent 860 // shrinking, these areas may be written to. Find the 861 // highest such pointer so we can handle everything 862 // there and below carefully. (This shouldn't be far 863 // from the bottom of the stack, so there's little 864 // cost in handling everything below it carefully.) 865 adjinfo.sghi = findsghi(gp, old) 866 867 // Synchronize with channel ops and copy the part of 868 // the stack they may interact with. 869 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 870 } 871 872 // Copy the stack (or the rest of it) to the new location 873 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 874 875 // Adjust remaining structures that have pointers into stacks. 876 // We have to do most of these before we traceback the new 877 // stack because gentraceback uses them. 878 adjustctxt(gp, &adjinfo) 879 adjustdefers(gp, &adjinfo) 880 adjustpanics(gp, &adjinfo) 881 if adjinfo.sghi != 0 { 882 adjinfo.sghi += adjinfo.delta 883 } 884 885 // Swap out old stack for new one 886 gp.stack = new 887 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 888 gp.sched.sp = new.hi - used 889 gp.stktopsp += adjinfo.delta 890 891 // Adjust pointers in the new stack. 892 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 893 894 // free old stack 895 if stackPoisonCopy != 0 { 896 fillstack(old, 0xfc) 897 } 898 stackfree(old) 899 } 900 901 // round x up to a power of 2. 902 func round2(x int32) int32 { 903 s := uint(0) 904 for 1<<s < x { 905 s++ 906 } 907 return 1 << s 908 } 909 910 // Called from runtime·morestack when more stack is needed. 911 // Allocate larger stack and relocate to new stack. 912 // Stack growth is multiplicative, for constant amortized cost. 913 // 914 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 915 // If the GC is trying to stop this g then it will set preemptscan to true. 916 // 917 // This must be nowritebarrierrec because it can be called as part of 918 // stack growth from other nowritebarrierrec functions, but the 919 // compiler doesn't check this. 920 // 921 //go:nowritebarrierrec 922 func newstack() { 923 thisg := getg() 924 // TODO: double check all gp. shouldn't be getg(). 925 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 926 throw("stack growth after fork") 927 } 928 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 929 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 930 morebuf := thisg.m.morebuf 931 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 932 throw("runtime: wrong goroutine in newstack") 933 } 934 935 gp := thisg.m.curg 936 937 if thisg.m.curg.throwsplit { 938 // Update syscallsp, syscallpc in case traceback uses them. 939 morebuf := thisg.m.morebuf 940 gp.syscallsp = morebuf.sp 941 gp.syscallpc = morebuf.pc 942 pcname, pcoff := "(unknown)", uintptr(0) 943 f := findfunc(gp.sched.pc) 944 if f.valid() { 945 pcname = funcname(f) 946 pcoff = gp.sched.pc - f.entry 947 } 948 print("runtime: newstack at ", pcname, "+", hex(pcoff), 949 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 950 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 951 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 952 953 thisg.m.traceback = 2 // Include runtime frames 954 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 955 throw("runtime: stack split at bad time") 956 } 957 958 morebuf := thisg.m.morebuf 959 thisg.m.morebuf.pc = 0 960 thisg.m.morebuf.lr = 0 961 thisg.m.morebuf.sp = 0 962 thisg.m.morebuf.g = 0 963 964 // NOTE: stackguard0 may change underfoot, if another thread 965 // is about to try to preempt gp. Read it just once and use that same 966 // value now and below. 967 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 968 969 // Be conservative about where we preempt. 970 // We are interested in preempting user Go code, not runtime code. 971 // If we're holding locks, mallocing, or preemption is disabled, don't 972 // preempt. 973 // This check is very early in newstack so that even the status change 974 // from Grunning to Gwaiting and back doesn't happen in this case. 975 // That status change by itself can be viewed as a small preemption, 976 // because the GC might change Gwaiting to Gscanwaiting, and then 977 // this goroutine has to wait for the GC to finish before continuing. 978 // If the GC is in some way dependent on this goroutine (for example, 979 // it needs a lock held by the goroutine), that small preemption turns 980 // into a real deadlock. 981 if preempt { 982 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 983 // Let the goroutine keep running for now. 984 // gp->preempt is set, so it will be preempted next time. 985 gp.stackguard0 = gp.stack.lo + _StackGuard 986 gogo(&gp.sched) // never return 987 } 988 } 989 990 if gp.stack.lo == 0 { 991 throw("missing stack in newstack") 992 } 993 sp := gp.sched.sp 994 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { 995 // The call to morestack cost a word. 996 sp -= sys.PtrSize 997 } 998 if stackDebug >= 1 || sp < gp.stack.lo { 999 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 1000 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 1001 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 1002 } 1003 if sp < gp.stack.lo { 1004 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 1005 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 1006 throw("runtime: split stack overflow") 1007 } 1008 1009 if preempt { 1010 if gp == thisg.m.g0 { 1011 throw("runtime: preempt g0") 1012 } 1013 if thisg.m.p == 0 && thisg.m.locks == 0 { 1014 throw("runtime: g is running but p is not") 1015 } 1016 // Synchronize with scang. 1017 casgstatus(gp, _Grunning, _Gwaiting) 1018 if gp.preemptscan { 1019 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1020 // Likely to be racing with the GC as 1021 // it sees a _Gwaiting and does the 1022 // stack scan. If so, gcworkdone will 1023 // be set and gcphasework will simply 1024 // return. 1025 } 1026 if !gp.gcscandone { 1027 // gcw is safe because we're on the 1028 // system stack. 1029 gcw := &gp.m.p.ptr().gcw 1030 scanstack(gp, gcw) 1031 if gcBlackenPromptly { 1032 gcw.dispose() 1033 } 1034 gp.gcscandone = true 1035 } 1036 gp.preemptscan = false 1037 gp.preempt = false 1038 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1039 // This clears gcscanvalid. 1040 casgstatus(gp, _Gwaiting, _Grunning) 1041 gp.stackguard0 = gp.stack.lo + _StackGuard 1042 gogo(&gp.sched) // never return 1043 } 1044 1045 // Act like goroutine called runtime.Gosched. 1046 casgstatus(gp, _Gwaiting, _Grunning) 1047 gopreempt_m(gp) // never return 1048 } 1049 1050 // Allocate a bigger segment and move the stack. 1051 oldsize := gp.stack.hi - gp.stack.lo 1052 newsize := oldsize * 2 1053 if newsize > maxstacksize { 1054 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1055 throw("stack overflow") 1056 } 1057 1058 // The goroutine must be executing in order to call newstack, 1059 // so it must be Grunning (or Gscanrunning). 1060 casgstatus(gp, _Grunning, _Gcopystack) 1061 1062 // The concurrent GC will not scan the stack while we are doing the copy since 1063 // the gp is in a Gcopystack status. 1064 copystack(gp, newsize, true) 1065 if stackDebug >= 1 { 1066 print("stack grow done\n") 1067 } 1068 casgstatus(gp, _Gcopystack, _Grunning) 1069 gogo(&gp.sched) 1070 } 1071 1072 //go:nosplit 1073 func nilfunc() { 1074 *(*uint8)(nil) = 0 1075 } 1076 1077 // adjust Gobuf as if it executed a call to fn 1078 // and then did an immediate gosave. 1079 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1080 var fn unsafe.Pointer 1081 if fv != nil { 1082 fn = unsafe.Pointer(fv.fn) 1083 } else { 1084 fn = unsafe.Pointer(funcPC(nilfunc)) 1085 } 1086 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1087 } 1088 1089 // Maybe shrink the stack being used by gp. 1090 // Called at garbage collection time. 1091 // gp must be stopped, but the world need not be. 1092 func shrinkstack(gp *g) { 1093 gstatus := readgstatus(gp) 1094 if gstatus&^_Gscan == _Gdead { 1095 if gp.stack.lo != 0 { 1096 // Free whole stack - it will get reallocated 1097 // if G is used again. 1098 stackfree(gp.stack) 1099 gp.stack.lo = 0 1100 gp.stack.hi = 0 1101 } 1102 return 1103 } 1104 if gp.stack.lo == 0 { 1105 throw("missing stack in shrinkstack") 1106 } 1107 if gstatus&_Gscan == 0 { 1108 throw("bad status in shrinkstack") 1109 } 1110 1111 if debug.gcshrinkstackoff > 0 { 1112 return 1113 } 1114 f := findfunc(gp.startpc) 1115 if f.valid() && f.funcID == funcID_gcBgMarkWorker { 1116 // We're not allowed to shrink the gcBgMarkWorker 1117 // stack (see gcBgMarkWorker for explanation). 1118 return 1119 } 1120 1121 oldsize := gp.stack.hi - gp.stack.lo 1122 newsize := oldsize / 2 1123 // Don't shrink the allocation below the minimum-sized stack 1124 // allocation. 1125 if newsize < _FixedStack { 1126 return 1127 } 1128 // Compute how much of the stack is currently in use and only 1129 // shrink the stack if gp is using less than a quarter of its 1130 // current stack. The currently used stack includes everything 1131 // down to the SP plus the stack guard space that ensures 1132 // there's room for nosplit functions. 1133 avail := gp.stack.hi - gp.stack.lo 1134 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1135 return 1136 } 1137 1138 // We can't copy the stack if we're in a syscall. 1139 // The syscall might have pointers into the stack. 1140 if gp.syscallsp != 0 { 1141 return 1142 } 1143 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1144 return 1145 } 1146 1147 if stackDebug > 0 { 1148 print("shrinking stack ", oldsize, "->", newsize, "\n") 1149 } 1150 1151 copystack(gp, newsize, false) 1152 } 1153 1154 // freeStackSpans frees unused stack spans at the end of GC. 1155 func freeStackSpans() { 1156 lock(&stackpoolmu) 1157 1158 // Scan stack pools for empty stack spans. 1159 for order := range stackpool { 1160 list := &stackpool[order] 1161 for s := list.first; s != nil; { 1162 next := s.next 1163 if s.allocCount == 0 { 1164 list.remove(s) 1165 s.manualFreeList = 0 1166 mheap_.freeManual(s, &memstats.stacks_inuse) 1167 } 1168 s = next 1169 } 1170 } 1171 1172 unlock(&stackpoolmu) 1173 1174 // Free large stack spans. 1175 lock(&stackLarge.lock) 1176 for i := range stackLarge.free { 1177 for s := stackLarge.free[i].first; s != nil; { 1178 next := s.next 1179 stackLarge.free[i].remove(s) 1180 mheap_.freeManual(s, &memstats.stacks_inuse) 1181 s = next 1182 } 1183 } 1184 unlock(&stackLarge.lock) 1185 } 1186 1187 //go:nosplit 1188 func morestackc() { 1189 systemstack(func() { 1190 throw("attempt to execute system stack code on user stack") 1191 }) 1192 }