github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/runtime/stack.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 Stack layout parameters. 15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc). 16 17 The per-goroutine g->stackguard is set to point StackGuard bytes 18 above the bottom of the stack. Each function compares its stack 19 pointer against g->stackguard to check for overflow. To cut one 20 instruction from the check sequence for functions with tiny frames, 21 the stack is allowed to protrude StackSmall bytes below the stack 22 guard. Functions with large frames don't bother with the check and 23 always call morestack. The sequences are (for amd64, others are 24 similar): 25 26 guard = g->stackguard 27 frame = function's stack frame size 28 argsize = size of function arguments (call + return) 29 30 stack frame size <= StackSmall: 31 CMPQ guard, SP 32 JHI 3(PC) 33 MOVQ m->morearg, $(argsize << 32) 34 CALL morestack(SB) 35 36 stack frame size > StackSmall but < StackBig 37 LEAQ (frame-StackSmall)(SP), R0 38 CMPQ guard, R0 39 JHI 3(PC) 40 MOVQ m->morearg, $(argsize << 32) 41 CALL morestack(SB) 42 43 stack frame size >= StackBig: 44 MOVQ m->morearg, $((argsize << 32) | frame) 45 CALL morestack(SB) 46 47 The bottom StackGuard - StackSmall bytes are important: there has 48 to be enough room to execute functions that refuse to check for 49 stack overflow, either because they need to be adjacent to the 50 actual caller's frame (deferproc) or because they handle the imminent 51 stack overflow (morestack). 52 53 For example, deferproc might call malloc, which does one of the 54 above checks (without allocating a full frame), which might trigger 55 a call to morestack. This sequence needs to fit in the bottom 56 section of the stack. On amd64, morestack's frame is 40 bytes, and 57 deferproc's frame is 56 bytes. That fits well within the 58 StackGuard - StackSmall bytes at the bottom. 59 The linkers explore all possible call traces involving non-splitting 60 functions to make sure that this limit cannot be violated. 61 */ 62 63 const ( 64 // StackSystem is a number of additional bytes to add 65 // to each stack below the usual guard area for OS-specific 66 // purposes like signal handling. Used on Windows, Plan 9, 67 // and Darwin/ARM because they do not use a separate stack. 68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 69 70 // The minimum size of stack used by Go code 71 _StackMin = 2048 72 73 // The minimum stack size to allocate. 74 // The hackery here rounds FixedStack0 up to a power of 2. 75 _FixedStack0 = _StackMin + _StackSystem 76 _FixedStack1 = _FixedStack0 - 1 77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) 78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) 79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) 80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) 81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) 82 _FixedStack = _FixedStack6 + 1 83 84 // Functions that need frames bigger than this use an extra 85 // instruction to do the stack split check, to avoid overflow 86 // in case SP - framesize wraps below zero. 87 // This value can be no bigger than the size of the unmapped 88 // space at zero. 89 _StackBig = 4096 90 91 // The stack guard is a pointer this many bytes above the 92 // bottom of the stack. 93 _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem 94 95 // After a stack split check the SP is allowed to be this 96 // many bytes below the stack guard. This saves an instruction 97 // in the checking sequence for tiny frames. 98 _StackSmall = 128 99 100 // The maximum number of bytes that a chain of NOSPLIT 101 // functions can use. 102 _StackLimit = _StackGuard - _StackSystem - _StackSmall 103 ) 104 105 // Goroutine preemption request. 106 // Stored into g->stackguard0 to cause split stack check failure. 107 // Must be greater than any real sp. 108 // 0xfffffade in hex. 109 const ( 110 _StackPreempt = uintptrMask & -1314 111 _StackFork = uintptrMask & -1234 112 ) 113 114 const ( 115 // stackDebug == 0: no logging 116 // == 1: logging of per-stack operations 117 // == 2: logging of per-frame operations 118 // == 3: logging of per-word updates 119 // == 4: logging of per-word reads 120 stackDebug = 0 121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 124 125 stackCache = 1 126 ) 127 128 const ( 129 uintptrMask = 1<<(8*sys.PtrSize) - 1 130 poisonStack = uintptrMask & 0x6868686868686868 131 132 // Goroutine preemption request. 133 // Stored into g->stackguard0 to cause split stack check failure. 134 // Must be greater than any real sp. 135 // 0xfffffade in hex. 136 stackPreempt = uintptrMask & -1314 137 138 // Thread is forking. 139 // Stored into g->stackguard0 to cause split stack check failure. 140 // Must be greater than any real sp. 141 stackFork = uintptrMask & -1234 142 ) 143 144 // Global pool of spans that have free stacks. 145 // Stacks are assigned an order according to size. 146 // order = log_2(size/FixedStack) 147 // There is a free list for each order. 148 // TODO: one lock per order? 149 var stackpool [_NumStackOrders]mSpanList 150 var stackpoolmu mutex 151 152 // Global pool of large stack spans. 153 var stackLarge struct { 154 lock mutex 155 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) 156 } 157 158 // Cached value of haveexperiment("framepointer") 159 var framepointer_enabled bool 160 161 func stackinit() { 162 if _StackCacheSize&_PageMask != 0 { 163 throw("cache size must be a multiple of page size") 164 } 165 for i := range stackpool { 166 stackpool[i].init() 167 } 168 for i := range stackLarge.free { 169 stackLarge.free[i].init() 170 } 171 } 172 173 // stacklog2 returns ⌊log_2(n)⌋. 174 func stacklog2(n uintptr) int { 175 log2 := 0 176 for n > 1 { 177 n >>= 1 178 log2++ 179 } 180 return log2 181 } 182 183 // Allocates a stack from the free pool. Must be called with 184 // stackpoolmu held. 185 func stackpoolalloc(order uint8) gclinkptr { 186 list := &stackpool[order] 187 s := list.first 188 if s == nil { 189 // no free stacks. Allocate another span worth. 190 s = mheap_.allocStack(_StackCacheSize >> _PageShift) 191 if s == nil { 192 throw("out of memory") 193 } 194 if s.ref != 0 { 195 throw("bad ref") 196 } 197 if s.freelist.ptr() != nil { 198 throw("bad freelist") 199 } 200 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 201 x := gclinkptr(uintptr(s.start)<<_PageShift + i) 202 x.ptr().next = s.freelist 203 s.freelist = x 204 } 205 list.insert(s) 206 } 207 x := s.freelist 208 if x.ptr() == nil { 209 throw("span has no free stacks") 210 } 211 s.freelist = x.ptr().next 212 s.ref++ 213 if s.freelist.ptr() == nil { 214 // all stacks in s are allocated. 215 list.remove(s) 216 } 217 return x 218 } 219 220 // Adds stack x to the free pool. Must be called with stackpoolmu held. 221 func stackpoolfree(x gclinkptr, order uint8) { 222 s := mheap_.lookup(unsafe.Pointer(x)) 223 if s.state != _MSpanStack { 224 throw("freeing stack not in a stack span") 225 } 226 if s.freelist.ptr() == nil { 227 // s will now have a free stack 228 stackpool[order].insert(s) 229 } 230 x.ptr().next = s.freelist 231 s.freelist = x 232 s.ref-- 233 if gcphase == _GCoff && s.ref == 0 { 234 // Span is completely free. Return it to the heap 235 // immediately if we're sweeping. 236 // 237 // If GC is active, we delay the free until the end of 238 // GC to avoid the following type of situation: 239 // 240 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 241 // 2) The stack that pointer points to is copied 242 // 3) The old stack is freed 243 // 4) The containing span is marked free 244 // 5) GC attempts to mark the SudoG.elem pointer. The 245 // marking fails because the pointer looks like a 246 // pointer into a free span. 247 // 248 // By not freeing, we prevent step #4 until GC is done. 249 stackpool[order].remove(s) 250 s.freelist = 0 251 mheap_.freeStack(s) 252 } 253 } 254 255 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 256 // The pool is required to prevent unlimited growth of per-thread caches. 257 func stackcacherefill(c *mcache, order uint8) { 258 if stackDebug >= 1 { 259 print("stackcacherefill order=", order, "\n") 260 } 261 262 // Grab some stacks from the global cache. 263 // Grab half of the allowed capacity (to prevent thrashing). 264 var list gclinkptr 265 var size uintptr 266 lock(&stackpoolmu) 267 for size < _StackCacheSize/2 { 268 x := stackpoolalloc(order) 269 x.ptr().next = list 270 list = x 271 size += _FixedStack << order 272 } 273 unlock(&stackpoolmu) 274 c.stackcache[order].list = list 275 c.stackcache[order].size = size 276 } 277 278 func stackcacherelease(c *mcache, order uint8) { 279 if stackDebug >= 1 { 280 print("stackcacherelease order=", order, "\n") 281 } 282 x := c.stackcache[order].list 283 size := c.stackcache[order].size 284 lock(&stackpoolmu) 285 for size > _StackCacheSize/2 { 286 y := x.ptr().next 287 stackpoolfree(x, order) 288 x = y 289 size -= _FixedStack << order 290 } 291 unlock(&stackpoolmu) 292 c.stackcache[order].list = x 293 c.stackcache[order].size = size 294 } 295 296 func stackcache_clear(c *mcache) { 297 if stackDebug >= 1 { 298 print("stackcache clear\n") 299 } 300 lock(&stackpoolmu) 301 for order := uint8(0); order < _NumStackOrders; order++ { 302 x := c.stackcache[order].list 303 for x.ptr() != nil { 304 y := x.ptr().next 305 stackpoolfree(x, order) 306 x = y 307 } 308 c.stackcache[order].list = 0 309 c.stackcache[order].size = 0 310 } 311 unlock(&stackpoolmu) 312 } 313 314 func stackalloc(n uint32) (stack, []stkbar) { 315 // Stackalloc must be called on scheduler stack, so that we 316 // never try to grow the stack during the code that stackalloc runs. 317 // Doing so would cause a deadlock (issue 1547). 318 thisg := getg() 319 if thisg != thisg.m.g0 { 320 throw("stackalloc not on scheduler stack") 321 } 322 if n&(n-1) != 0 { 323 throw("stack size not a power of 2") 324 } 325 if stackDebug >= 1 { 326 print("stackalloc ", n, "\n") 327 } 328 329 // Compute the size of stack barrier array. 330 maxstkbar := gcMaxStackBarriers(int(n)) 331 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 332 333 if debug.efence != 0 || stackFromSystem != 0 { 334 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys) 335 if v == nil { 336 throw("out of memory (stackalloc)") 337 } 338 top := uintptr(n) - nstkbar 339 stkbarSlice := slice{add(v, top), 0, maxstkbar} 340 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 341 } 342 343 // Small stacks are allocated with a fixed-size free-list allocator. 344 // If we need a stack of a bigger size, we fall back on allocating 345 // a dedicated span. 346 var v unsafe.Pointer 347 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 348 order := uint8(0) 349 n2 := n 350 for n2 > _FixedStack { 351 order++ 352 n2 >>= 1 353 } 354 var x gclinkptr 355 c := thisg.m.mcache 356 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 357 // c == nil can happen in the guts of exitsyscall or 358 // procresize. Just get a stack from the global pool. 359 // Also don't touch stackcache during gc 360 // as it's flushed concurrently. 361 lock(&stackpoolmu) 362 x = stackpoolalloc(order) 363 unlock(&stackpoolmu) 364 } else { 365 x = c.stackcache[order].list 366 if x.ptr() == nil { 367 stackcacherefill(c, order) 368 x = c.stackcache[order].list 369 } 370 c.stackcache[order].list = x.ptr().next 371 c.stackcache[order].size -= uintptr(n) 372 } 373 v = unsafe.Pointer(x) 374 } else { 375 var s *mspan 376 npage := uintptr(n) >> _PageShift 377 log2npage := stacklog2(npage) 378 379 // Try to get a stack from the large stack cache. 380 lock(&stackLarge.lock) 381 if !stackLarge.free[log2npage].isEmpty() { 382 s = stackLarge.free[log2npage].first 383 stackLarge.free[log2npage].remove(s) 384 } 385 unlock(&stackLarge.lock) 386 387 if s == nil { 388 // Allocate a new stack from the heap. 389 s = mheap_.allocStack(npage) 390 if s == nil { 391 throw("out of memory") 392 } 393 } 394 v = unsafe.Pointer(s.start << _PageShift) 395 } 396 397 if raceenabled { 398 racemalloc(v, uintptr(n)) 399 } 400 if msanenabled { 401 msanmalloc(v, uintptr(n)) 402 } 403 if stackDebug >= 1 { 404 print(" allocated ", v, "\n") 405 } 406 top := uintptr(n) - nstkbar 407 stkbarSlice := slice{add(v, top), 0, maxstkbar} 408 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 409 } 410 411 func stackfree(stk stack, n uintptr) { 412 gp := getg() 413 v := unsafe.Pointer(stk.lo) 414 if n&(n-1) != 0 { 415 throw("stack not a power of 2") 416 } 417 if stk.lo+n < stk.hi { 418 throw("bad stack size") 419 } 420 if stackDebug >= 1 { 421 println("stackfree", v, n) 422 memclr(v, n) // for testing, clobber stack data 423 } 424 if debug.efence != 0 || stackFromSystem != 0 { 425 if debug.efence != 0 || stackFaultOnFree != 0 { 426 sysFault(v, n) 427 } else { 428 sysFree(v, n, &memstats.stacks_sys) 429 } 430 return 431 } 432 if msanenabled { 433 msanfree(v, n) 434 } 435 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 436 order := uint8(0) 437 n2 := n 438 for n2 > _FixedStack { 439 order++ 440 n2 >>= 1 441 } 442 x := gclinkptr(v) 443 c := gp.m.mcache 444 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 445 lock(&stackpoolmu) 446 stackpoolfree(x, order) 447 unlock(&stackpoolmu) 448 } else { 449 if c.stackcache[order].size >= _StackCacheSize { 450 stackcacherelease(c, order) 451 } 452 x.ptr().next = c.stackcache[order].list 453 c.stackcache[order].list = x 454 c.stackcache[order].size += n 455 } 456 } else { 457 s := mheap_.lookup(v) 458 if s.state != _MSpanStack { 459 println(hex(s.start<<_PageShift), v) 460 throw("bad span state") 461 } 462 if gcphase == _GCoff { 463 // Free the stack immediately if we're 464 // sweeping. 465 mheap_.freeStack(s) 466 } else { 467 // If the GC is running, we can't return a 468 // stack span to the heap because it could be 469 // reused as a heap span, and this state 470 // change would race with GC. Add it to the 471 // large stack cache instead. 472 log2npage := stacklog2(s.npages) 473 lock(&stackLarge.lock) 474 stackLarge.free[log2npage].insert(s) 475 unlock(&stackLarge.lock) 476 } 477 } 478 } 479 480 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 481 482 var ptrnames = []string{ 483 0: "scalar", 484 1: "ptr", 485 } 486 487 // Stack frame layout 488 // 489 // (x86) 490 // +------------------+ 491 // | args from caller | 492 // +------------------+ <- frame->argp 493 // | return address | 494 // +------------------+ 495 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 496 // +------------------+ <- frame->varp 497 // | locals | 498 // +------------------+ 499 // | args to callee | 500 // +------------------+ <- frame->sp 501 // 502 // (arm) 503 // +------------------+ 504 // | args from caller | 505 // +------------------+ <- frame->argp 506 // | caller's retaddr | 507 // +------------------+ <- frame->varp 508 // | locals | 509 // +------------------+ 510 // | args to callee | 511 // +------------------+ 512 // | return address | 513 // +------------------+ <- frame->sp 514 515 type adjustinfo struct { 516 old stack 517 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 518 cache pcvalueCache 519 520 // sghi is the highest sudog.elem on the stack. 521 sghi uintptr 522 } 523 524 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 525 // If so, it rewrites *vpp to point into the new stack. 526 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 527 pp := (*uintptr)(vpp) 528 p := *pp 529 if stackDebug >= 4 { 530 print(" ", pp, ":", hex(p), "\n") 531 } 532 if adjinfo.old.lo <= p && p < adjinfo.old.hi { 533 *pp = p + adjinfo.delta 534 if stackDebug >= 3 { 535 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 536 } 537 } 538 } 539 540 // Information from the compiler about the layout of stack frames. 541 type bitvector struct { 542 n int32 // # of bits 543 bytedata *uint8 544 } 545 546 type gobitvector struct { 547 n uintptr 548 bytedata []uint8 549 } 550 551 func gobv(bv bitvector) gobitvector { 552 return gobitvector{ 553 uintptr(bv.n), 554 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 555 } 556 } 557 558 func ptrbit(bv *gobitvector, i uintptr) uint8 { 559 return (bv.bytedata[i/8] >> (i % 8)) & 1 560 } 561 562 // bv describes the memory starting at address scanp. 563 // Adjust any pointers contained therein. 564 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 565 bv := gobv(*cbv) 566 minp := adjinfo.old.lo 567 maxp := adjinfo.old.hi 568 delta := adjinfo.delta 569 num := bv.n 570 // If this frame might contain channel receive slots, use CAS 571 // to adjust pointers. If the slot hasn't been received into 572 // yet, it may contain stack pointers and a concurrent send 573 // could race with adjusting those pointers. (The sent value 574 // itself can never contain stack pointers.) 575 useCAS := uintptr(scanp) < adjinfo.sghi 576 for i := uintptr(0); i < num; i++ { 577 if stackDebug >= 4 { 578 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 579 } 580 if ptrbit(&bv, i) == 1 { 581 pp := (*uintptr)(add(scanp, i*sys.PtrSize)) 582 retry: 583 p := *pp 584 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack { 585 // Looks like a junk value in a pointer slot. 586 // Live analysis wrong? 587 getg().m.traceback = 2 588 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 589 throw("invalid stack pointer") 590 } 591 if minp <= p && p < maxp { 592 if stackDebug >= 3 { 593 print("adjust ptr ", p, " ", funcname(f), "\n") 594 } 595 if useCAS { 596 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 597 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 598 goto retry 599 } 600 } else { 601 *pp = p + delta 602 } 603 } 604 } 605 } 606 } 607 608 // Note: the argument/return area is adjusted by the callee. 609 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 610 adjinfo := (*adjustinfo)(arg) 611 targetpc := frame.continpc 612 if targetpc == 0 { 613 // Frame is dead. 614 return true 615 } 616 f := frame.fn 617 if stackDebug >= 2 { 618 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 619 } 620 if f.entry == systemstack_switchPC { 621 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 622 // We will allow it to be copied even though we don't 623 // have full GC info for it (because it is written in asm). 624 return true 625 } 626 if targetpc != f.entry { 627 targetpc-- 628 } 629 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) 630 if pcdata == -1 { 631 pcdata = 0 // in prologue 632 } 633 634 // Adjust local variables if stack frame has been allocated. 635 size := frame.varp - frame.sp 636 var minsize uintptr 637 switch sys.TheChar { 638 case '7': 639 minsize = sys.SpAlign 640 default: 641 minsize = sys.MinFrameSize 642 } 643 if size > minsize { 644 var bv bitvector 645 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 646 if stackmap == nil || stackmap.n <= 0 { 647 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 648 throw("missing stackmap") 649 } 650 // Locals bitmap information, scan just the pointers in locals. 651 if pcdata < 0 || pcdata >= stackmap.n { 652 // don't know where we are 653 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 654 throw("bad symbol table") 655 } 656 bv = stackmapdata(stackmap, pcdata) 657 size = uintptr(bv.n) * sys.PtrSize 658 if stackDebug >= 3 { 659 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") 660 } 661 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 662 } 663 664 // Adjust saved base pointer if there is one. 665 if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize { 666 if !framepointer_enabled { 667 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 668 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 669 throw("bad frame layout") 670 } 671 if stackDebug >= 3 { 672 print(" saved bp\n") 673 } 674 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 675 } 676 677 // Adjust arguments. 678 if frame.arglen > 0 { 679 var bv bitvector 680 if frame.argmap != nil { 681 bv = *frame.argmap 682 } else { 683 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 684 if stackmap == nil || stackmap.n <= 0 { 685 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") 686 throw("missing stackmap") 687 } 688 if pcdata < 0 || pcdata >= stackmap.n { 689 // don't know where we are 690 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 691 throw("bad symbol table") 692 } 693 bv = stackmapdata(stackmap, pcdata) 694 } 695 if stackDebug >= 3 { 696 print(" args\n") 697 } 698 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 699 } 700 return true 701 } 702 703 func adjustctxt(gp *g, adjinfo *adjustinfo) { 704 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 705 } 706 707 func adjustdefers(gp *g, adjinfo *adjustinfo) { 708 // Adjust defer argument blocks the same way we adjust active stack frames. 709 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 710 711 // Adjust pointers in the Defer structs. 712 // Defer structs themselves are never on the stack. 713 for d := gp._defer; d != nil; d = d.link { 714 adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 715 adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 716 adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) 717 } 718 } 719 720 func adjustpanics(gp *g, adjinfo *adjustinfo) { 721 // Panics are on stack and already adjusted. 722 // Update pointer to head of list in G. 723 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) 724 } 725 726 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 727 // the data elements pointed to by a SudoG structure 728 // might be in the stack. 729 for s := gp.waiting; s != nil; s = s.waitlink { 730 adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 731 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone)) 732 } 733 } 734 735 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 736 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 737 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr)) 738 } 739 } 740 741 func fillstack(stk stack, b byte) { 742 for p := stk.lo; p < stk.hi; p++ { 743 *(*byte)(unsafe.Pointer(p)) = b 744 } 745 } 746 747 func findsghi(gp *g, stk stack) uintptr { 748 var sghi uintptr 749 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 750 p := uintptr(sg.elem) + uintptr(sg.c.elemsize) 751 if stk.lo <= p && p < stk.hi && p > sghi { 752 sghi = p 753 } 754 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone) 755 if stk.lo <= p && p < stk.hi && p > sghi { 756 sghi = p 757 } 758 } 759 return sghi 760 } 761 762 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's 763 // stack they refer to while synchronizing with concurrent channel 764 // operations. It returns the number of bytes of stack copied. 765 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { 766 if gp.waiting == nil { 767 return 0 768 } 769 770 // Lock channels to prevent concurrent send/receive. 771 // It's important that we *only* do this for async 772 // copystack; otherwise, gp may be in the middle of 773 // putting itself on wait queues and this would 774 // self-deadlock. 775 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 776 lock(&sg.c.lock) 777 } 778 779 // Adjust sudogs. 780 adjustsudogs(gp, adjinfo) 781 782 // Copy the part of the stack the sudogs point in to 783 // while holding the lock to prevent races on 784 // send/receive slots. 785 var sgsize uintptr 786 if adjinfo.sghi != 0 { 787 oldBot := adjinfo.old.hi - used 788 newBot := oldBot + adjinfo.delta 789 sgsize = adjinfo.sghi - oldBot 790 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) 791 } 792 793 // Unlock channels. 794 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 795 unlock(&sg.c.lock) 796 } 797 798 return sgsize 799 } 800 801 // Copies gp's stack to a new stack of a different size. 802 // Caller must have changed gp status to Gcopystack. 803 // 804 // If sync is true, this is a self-triggered stack growth and, in 805 // particular, no other G may be writing to gp's stack (e.g., via a 806 // channel operation). If sync is false, copystack protects against 807 // concurrent channel operations. 808 func copystack(gp *g, newsize uintptr, sync bool) { 809 if gp.syscallsp != 0 { 810 throw("stack growth not allowed in system call") 811 } 812 old := gp.stack 813 if old.lo == 0 { 814 throw("nil stackbase") 815 } 816 used := old.hi - gp.sched.sp 817 818 // allocate new stack 819 new, newstkbar := stackalloc(uint32(newsize)) 820 if stackPoisonCopy != 0 { 821 fillstack(new, 0xfd) 822 } 823 if stackDebug >= 1 { 824 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 825 } 826 827 // Compute adjustment. 828 var adjinfo adjustinfo 829 adjinfo.old = old 830 adjinfo.delta = new.hi - old.hi 831 832 // Adjust sudogs, synchronizing with channel ops if necessary. 833 ncopy := used 834 if sync { 835 adjustsudogs(gp, &adjinfo) 836 } else { 837 // sudogs can point in to the stack. During concurrent 838 // shrinking, these areas may be written to. Find the 839 // highest such pointer so we can handle everything 840 // there and below carefully. (This shouldn't be far 841 // from the bottom of the stack, so there's little 842 // cost in handling everything below it carefully.) 843 adjinfo.sghi = findsghi(gp, old) 844 845 // Synchronize with channel ops and copy the part of 846 // the stack they may interact with. 847 ncopy -= syncadjustsudogs(gp, used, &adjinfo) 848 } 849 850 // Copy the stack (or the rest of it) to the new location 851 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 852 853 // Disallow sigprof scans of this stack and block if there's 854 // one in progress. 855 gcLockStackBarriers(gp) 856 857 // Adjust remaining structures that have pointers into stacks. 858 // We have to do most of these before we traceback the new 859 // stack because gentraceback uses them. 860 adjustctxt(gp, &adjinfo) 861 adjustdefers(gp, &adjinfo) 862 adjustpanics(gp, &adjinfo) 863 adjuststkbar(gp, &adjinfo) 864 if adjinfo.sghi != 0 { 865 adjinfo.sghi += adjinfo.delta 866 } 867 868 // copy old stack barriers to new stack barrier array 869 newstkbar = newstkbar[:len(gp.stkbar)] 870 copy(newstkbar, gp.stkbar) 871 872 // Swap out old stack for new one 873 gp.stack = new 874 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 875 gp.sched.sp = new.hi - used 876 oldsize := gp.stackAlloc 877 gp.stackAlloc = newsize 878 gp.stkbar = newstkbar 879 gp.stktopsp += adjinfo.delta 880 881 // Adjust pointers in the new stack. 882 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 883 884 gcUnlockStackBarriers(gp) 885 886 // free old stack 887 if stackPoisonCopy != 0 { 888 fillstack(old, 0xfc) 889 } 890 stackfree(old, oldsize) 891 } 892 893 // round x up to a power of 2. 894 func round2(x int32) int32 { 895 s := uint(0) 896 for 1<<s < x { 897 s++ 898 } 899 return 1 << s 900 } 901 902 // Called from runtime·morestack when more stack is needed. 903 // Allocate larger stack and relocate to new stack. 904 // Stack growth is multiplicative, for constant amortized cost. 905 // 906 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 907 // If the GC is trying to stop this g then it will set preemptscan to true. 908 func newstack() { 909 thisg := getg() 910 // TODO: double check all gp. shouldn't be getg(). 911 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 912 throw("stack growth after fork") 913 } 914 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 915 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 916 morebuf := thisg.m.morebuf 917 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 918 throw("runtime: wrong goroutine in newstack") 919 } 920 if thisg.m.curg.throwsplit { 921 gp := thisg.m.curg 922 // Update syscallsp, syscallpc in case traceback uses them. 923 morebuf := thisg.m.morebuf 924 gp.syscallsp = morebuf.sp 925 gp.syscallpc = morebuf.pc 926 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 927 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 928 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 929 930 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 931 throw("runtime: stack split at bad time") 932 } 933 934 gp := thisg.m.curg 935 morebuf := thisg.m.morebuf 936 thisg.m.morebuf.pc = 0 937 thisg.m.morebuf.lr = 0 938 thisg.m.morebuf.sp = 0 939 thisg.m.morebuf.g = 0 940 rewindmorestack(&gp.sched) 941 942 // NOTE: stackguard0 may change underfoot, if another thread 943 // is about to try to preempt gp. Read it just once and use that same 944 // value now and below. 945 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt 946 947 // Be conservative about where we preempt. 948 // We are interested in preempting user Go code, not runtime code. 949 // If we're holding locks, mallocing, or preemption is disabled, don't 950 // preempt. 951 // This check is very early in newstack so that even the status change 952 // from Grunning to Gwaiting and back doesn't happen in this case. 953 // That status change by itself can be viewed as a small preemption, 954 // because the GC might change Gwaiting to Gscanwaiting, and then 955 // this goroutine has to wait for the GC to finish before continuing. 956 // If the GC is in some way dependent on this goroutine (for example, 957 // it needs a lock held by the goroutine), that small preemption turns 958 // into a real deadlock. 959 if preempt { 960 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 961 // Let the goroutine keep running for now. 962 // gp->preempt is set, so it will be preempted next time. 963 gp.stackguard0 = gp.stack.lo + _StackGuard 964 gogo(&gp.sched) // never return 965 } 966 } 967 968 if gp.stack.lo == 0 { 969 throw("missing stack in newstack") 970 } 971 sp := gp.sched.sp 972 if sys.TheChar == '6' || sys.TheChar == '8' { 973 // The call to morestack cost a word. 974 sp -= sys.PtrSize 975 } 976 if stackDebug >= 1 || sp < gp.stack.lo { 977 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 978 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 979 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 980 } 981 if sp < gp.stack.lo { 982 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 983 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 984 throw("runtime: split stack overflow") 985 } 986 987 if gp.sched.ctxt != nil { 988 // morestack wrote sched.ctxt on its way in here, 989 // without a write barrier. Run the write barrier now. 990 // It is not possible to be preempted between then 991 // and now, so it's okay. 992 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) 993 } 994 995 if preempt { 996 if gp == thisg.m.g0 { 997 throw("runtime: preempt g0") 998 } 999 if thisg.m.p == 0 && thisg.m.locks == 0 { 1000 throw("runtime: g is running but p is not") 1001 } 1002 // Synchronize with scang. 1003 casgstatus(gp, _Grunning, _Gwaiting) 1004 if gp.preemptscan { 1005 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 1006 // Likely to be racing with the GC as 1007 // it sees a _Gwaiting and does the 1008 // stack scan. If so, gcworkdone will 1009 // be set and gcphasework will simply 1010 // return. 1011 } 1012 if !gp.gcscandone { 1013 scanstack(gp) 1014 gp.gcscandone = true 1015 } 1016 gp.preemptscan = false 1017 gp.preempt = false 1018 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 1019 casgstatus(gp, _Gwaiting, _Grunning) 1020 gp.stackguard0 = gp.stack.lo + _StackGuard 1021 gogo(&gp.sched) // never return 1022 } 1023 1024 // Act like goroutine called runtime.Gosched. 1025 casgstatus(gp, _Gwaiting, _Grunning) 1026 gopreempt_m(gp) // never return 1027 } 1028 1029 // Allocate a bigger segment and move the stack. 1030 oldsize := int(gp.stackAlloc) 1031 newsize := oldsize * 2 1032 if uintptr(newsize) > maxstacksize { 1033 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 1034 throw("stack overflow") 1035 } 1036 1037 // The goroutine must be executing in order to call newstack, 1038 // so it must be Grunning (or Gscanrunning). 1039 casgstatus(gp, _Grunning, _Gcopystack) 1040 1041 // The concurrent GC will not scan the stack while we are doing the copy since 1042 // the gp is in a Gcopystack status. 1043 copystack(gp, uintptr(newsize), true) 1044 if stackDebug >= 1 { 1045 print("stack grow done\n") 1046 } 1047 casgstatus(gp, _Gcopystack, _Grunning) 1048 gogo(&gp.sched) 1049 } 1050 1051 //go:nosplit 1052 func nilfunc() { 1053 *(*uint8)(nil) = 0 1054 } 1055 1056 // adjust Gobuf as if it executed a call to fn 1057 // and then did an immediate gosave. 1058 func gostartcallfn(gobuf *gobuf, fv *funcval) { 1059 var fn unsafe.Pointer 1060 if fv != nil { 1061 fn = unsafe.Pointer(fv.fn) 1062 } else { 1063 fn = unsafe.Pointer(funcPC(nilfunc)) 1064 } 1065 gostartcall(gobuf, fn, unsafe.Pointer(fv)) 1066 } 1067 1068 // Maybe shrink the stack being used by gp. 1069 // Called at garbage collection time. 1070 // gp must be stopped, but the world need not be. 1071 func shrinkstack(gp *g) { 1072 gstatus := readgstatus(gp) 1073 if gstatus&^_Gscan == _Gdead { 1074 if gp.stack.lo != 0 { 1075 // Free whole stack - it will get reallocated 1076 // if G is used again. 1077 stackfree(gp.stack, gp.stackAlloc) 1078 gp.stack.lo = 0 1079 gp.stack.hi = 0 1080 gp.stkbar = nil 1081 gp.stkbarPos = 0 1082 } 1083 return 1084 } 1085 if gp.stack.lo == 0 { 1086 throw("missing stack in shrinkstack") 1087 } 1088 if gstatus&_Gscan == 0 { 1089 throw("bad status in shrinkstack") 1090 } 1091 1092 if debug.gcshrinkstackoff > 0 { 1093 return 1094 } 1095 1096 oldsize := gp.stackAlloc 1097 newsize := oldsize / 2 1098 // Don't shrink the allocation below the minimum-sized stack 1099 // allocation. 1100 if newsize < _FixedStack { 1101 return 1102 } 1103 // Compute how much of the stack is currently in use and only 1104 // shrink the stack if gp is using less than a quarter of its 1105 // current stack. The currently used stack includes everything 1106 // down to the SP plus the stack guard space that ensures 1107 // there's room for nosplit functions. 1108 avail := gp.stack.hi - gp.stack.lo 1109 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 1110 return 1111 } 1112 1113 // We can't copy the stack if we're in a syscall. 1114 // The syscall might have pointers into the stack. 1115 if gp.syscallsp != 0 { 1116 return 1117 } 1118 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 1119 return 1120 } 1121 1122 if stackDebug > 0 { 1123 print("shrinking stack ", oldsize, "->", newsize, "\n") 1124 } 1125 1126 copystack(gp, newsize, false) 1127 } 1128 1129 // freeStackSpans frees unused stack spans at the end of GC. 1130 func freeStackSpans() { 1131 lock(&stackpoolmu) 1132 1133 // Scan stack pools for empty stack spans. 1134 for order := range stackpool { 1135 list := &stackpool[order] 1136 for s := list.first; s != nil; { 1137 next := s.next 1138 if s.ref == 0 { 1139 list.remove(s) 1140 s.freelist = 0 1141 mheap_.freeStack(s) 1142 } 1143 s = next 1144 } 1145 } 1146 1147 unlock(&stackpoolmu) 1148 1149 // Free large stack spans. 1150 lock(&stackLarge.lock) 1151 for i := range stackLarge.free { 1152 for s := stackLarge.free[i].first; s != nil; { 1153 next := s.next 1154 stackLarge.free[i].remove(s) 1155 mheap_.freeStack(s) 1156 s = next 1157 } 1158 } 1159 unlock(&stackLarge.lock) 1160 } 1161 1162 //go:nosplit 1163 func morestackc() { 1164 systemstack(func() { 1165 throw("attempt to execute C code on Go stack") 1166 }) 1167 }