github.com/jonasi/go@v0.0.0-20150930005915-e78e654c1de0/src/runtime/stack1.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 const ( 10 // stackDebug == 0: no logging 11 // == 1: logging of per-stack operations 12 // == 2: logging of per-frame operations 13 // == 3: logging of per-word updates 14 // == 4: logging of per-word reads 15 stackDebug = 0 16 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 17 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 18 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 19 20 stackCache = 1 21 ) 22 23 const ( 24 uintptrMask = 1<<(8*ptrSize) - 1 25 poisonStack = uintptrMask & 0x6868686868686868 26 27 // Goroutine preemption request. 28 // Stored into g->stackguard0 to cause split stack check failure. 29 // Must be greater than any real sp. 30 // 0xfffffade in hex. 31 stackPreempt = uintptrMask & -1314 32 33 // Thread is forking. 34 // Stored into g->stackguard0 to cause split stack check failure. 35 // Must be greater than any real sp. 36 stackFork = uintptrMask & -1234 37 ) 38 39 // Global pool of spans that have free stacks. 40 // Stacks are assigned an order according to size. 41 // order = log_2(size/FixedStack) 42 // There is a free list for each order. 43 // TODO: one lock per order? 44 var stackpool [_NumStackOrders]mspan 45 var stackpoolmu mutex 46 47 // List of stack spans to be freed at the end of GC. Protected by 48 // stackpoolmu. 49 var stackFreeQueue mspan 50 51 // Cached value of haveexperiment("framepointer") 52 var framepointer_enabled bool 53 54 func stackinit() { 55 if _StackCacheSize&_PageMask != 0 { 56 throw("cache size must be a multiple of page size") 57 } 58 for i := range stackpool { 59 mSpanList_Init(&stackpool[i]) 60 } 61 mSpanList_Init(&stackFreeQueue) 62 } 63 64 // Allocates a stack from the free pool. Must be called with 65 // stackpoolmu held. 66 func stackpoolalloc(order uint8) gclinkptr { 67 list := &stackpool[order] 68 s := list.next 69 if s == list { 70 // no free stacks. Allocate another span worth. 71 s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift) 72 if s == nil { 73 throw("out of memory") 74 } 75 if s.ref != 0 { 76 throw("bad ref") 77 } 78 if s.freelist.ptr() != nil { 79 throw("bad freelist") 80 } 81 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 82 x := gclinkptr(uintptr(s.start)<<_PageShift + i) 83 x.ptr().next = s.freelist 84 s.freelist = x 85 } 86 mSpanList_Insert(list, s) 87 } 88 x := s.freelist 89 if x.ptr() == nil { 90 throw("span has no free stacks") 91 } 92 s.freelist = x.ptr().next 93 s.ref++ 94 if s.freelist.ptr() == nil { 95 // all stacks in s are allocated. 96 mSpanList_Remove(s) 97 } 98 return x 99 } 100 101 // Adds stack x to the free pool. Must be called with stackpoolmu held. 102 func stackpoolfree(x gclinkptr, order uint8) { 103 s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x)) 104 if s.state != _MSpanStack { 105 throw("freeing stack not in a stack span") 106 } 107 if s.freelist.ptr() == nil { 108 // s will now have a free stack 109 mSpanList_Insert(&stackpool[order], s) 110 } 111 x.ptr().next = s.freelist 112 s.freelist = x 113 s.ref-- 114 if gcphase == _GCoff && s.ref == 0 { 115 // Span is completely free. Return it to the heap 116 // immediately if we're sweeping. 117 // 118 // If GC is active, we delay the free until the end of 119 // GC to avoid the following type of situation: 120 // 121 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 122 // 2) The stack that pointer points to is copied 123 // 3) The old stack is freed 124 // 4) The containing span is marked free 125 // 5) GC attempts to mark the SudoG.elem pointer. The 126 // marking fails because the pointer looks like a 127 // pointer into a free span. 128 // 129 // By not freeing, we prevent step #4 until GC is done. 130 mSpanList_Remove(s) 131 s.freelist = 0 132 mHeap_FreeStack(&mheap_, s) 133 } 134 } 135 136 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 137 // The pool is required to prevent unlimited growth of per-thread caches. 138 func stackcacherefill(c *mcache, order uint8) { 139 if stackDebug >= 1 { 140 print("stackcacherefill order=", order, "\n") 141 } 142 143 // Grab some stacks from the global cache. 144 // Grab half of the allowed capacity (to prevent thrashing). 145 var list gclinkptr 146 var size uintptr 147 lock(&stackpoolmu) 148 for size < _StackCacheSize/2 { 149 x := stackpoolalloc(order) 150 x.ptr().next = list 151 list = x 152 size += _FixedStack << order 153 } 154 unlock(&stackpoolmu) 155 c.stackcache[order].list = list 156 c.stackcache[order].size = size 157 } 158 159 func stackcacherelease(c *mcache, order uint8) { 160 if stackDebug >= 1 { 161 print("stackcacherelease order=", order, "\n") 162 } 163 x := c.stackcache[order].list 164 size := c.stackcache[order].size 165 lock(&stackpoolmu) 166 for size > _StackCacheSize/2 { 167 y := x.ptr().next 168 stackpoolfree(x, order) 169 x = y 170 size -= _FixedStack << order 171 } 172 unlock(&stackpoolmu) 173 c.stackcache[order].list = x 174 c.stackcache[order].size = size 175 } 176 177 func stackcache_clear(c *mcache) { 178 if stackDebug >= 1 { 179 print("stackcache clear\n") 180 } 181 lock(&stackpoolmu) 182 for order := uint8(0); order < _NumStackOrders; order++ { 183 x := c.stackcache[order].list 184 for x.ptr() != nil { 185 y := x.ptr().next 186 stackpoolfree(x, order) 187 x = y 188 } 189 c.stackcache[order].list = 0 190 c.stackcache[order].size = 0 191 } 192 unlock(&stackpoolmu) 193 } 194 195 func stackalloc(n uint32) (stack, []stkbar) { 196 // Stackalloc must be called on scheduler stack, so that we 197 // never try to grow the stack during the code that stackalloc runs. 198 // Doing so would cause a deadlock (issue 1547). 199 thisg := getg() 200 if thisg != thisg.m.g0 { 201 throw("stackalloc not on scheduler stack") 202 } 203 if n&(n-1) != 0 { 204 throw("stack size not a power of 2") 205 } 206 if stackDebug >= 1 { 207 print("stackalloc ", n, "\n") 208 } 209 210 // Compute the size of stack barrier array. 211 maxstkbar := gcMaxStackBarriers(int(n)) 212 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 213 214 if debug.efence != 0 || stackFromSystem != 0 { 215 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys) 216 if v == nil { 217 throw("out of memory (stackalloc)") 218 } 219 top := uintptr(n) - nstkbar 220 stkbarSlice := slice{add(v, top), 0, maxstkbar} 221 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 222 } 223 224 // Small stacks are allocated with a fixed-size free-list allocator. 225 // If we need a stack of a bigger size, we fall back on allocating 226 // a dedicated span. 227 var v unsafe.Pointer 228 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 229 order := uint8(0) 230 n2 := n 231 for n2 > _FixedStack { 232 order++ 233 n2 >>= 1 234 } 235 var x gclinkptr 236 c := thisg.m.mcache 237 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 238 // c == nil can happen in the guts of exitsyscall or 239 // procresize. Just get a stack from the global pool. 240 // Also don't touch stackcache during gc 241 // as it's flushed concurrently. 242 lock(&stackpoolmu) 243 x = stackpoolalloc(order) 244 unlock(&stackpoolmu) 245 } else { 246 x = c.stackcache[order].list 247 if x.ptr() == nil { 248 stackcacherefill(c, order) 249 x = c.stackcache[order].list 250 } 251 c.stackcache[order].list = x.ptr().next 252 c.stackcache[order].size -= uintptr(n) 253 } 254 v = (unsafe.Pointer)(x) 255 } else { 256 s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift) 257 if s == nil { 258 throw("out of memory") 259 } 260 v = (unsafe.Pointer)(s.start << _PageShift) 261 } 262 263 if raceenabled { 264 racemalloc(v, uintptr(n)) 265 } 266 if stackDebug >= 1 { 267 print(" allocated ", v, "\n") 268 } 269 top := uintptr(n) - nstkbar 270 stkbarSlice := slice{add(v, top), 0, maxstkbar} 271 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 272 } 273 274 func stackfree(stk stack, n uintptr) { 275 gp := getg() 276 v := (unsafe.Pointer)(stk.lo) 277 if n&(n-1) != 0 { 278 throw("stack not a power of 2") 279 } 280 if stk.lo+n < stk.hi { 281 throw("bad stack size") 282 } 283 if stackDebug >= 1 { 284 println("stackfree", v, n) 285 memclr(v, n) // for testing, clobber stack data 286 } 287 if debug.efence != 0 || stackFromSystem != 0 { 288 if debug.efence != 0 || stackFaultOnFree != 0 { 289 sysFault(v, n) 290 } else { 291 sysFree(v, n, &memstats.stacks_sys) 292 } 293 return 294 } 295 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 296 order := uint8(0) 297 n2 := n 298 for n2 > _FixedStack { 299 order++ 300 n2 >>= 1 301 } 302 x := gclinkptr(v) 303 c := gp.m.mcache 304 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 305 lock(&stackpoolmu) 306 stackpoolfree(x, order) 307 unlock(&stackpoolmu) 308 } else { 309 if c.stackcache[order].size >= _StackCacheSize { 310 stackcacherelease(c, order) 311 } 312 x.ptr().next = c.stackcache[order].list 313 c.stackcache[order].list = x 314 c.stackcache[order].size += n 315 } 316 } else { 317 s := mHeap_Lookup(&mheap_, v) 318 if s.state != _MSpanStack { 319 println(hex(s.start<<_PageShift), v) 320 throw("bad span state") 321 } 322 if gcphase == _GCoff { 323 // Free the stack immediately if we're 324 // sweeping. 325 mHeap_FreeStack(&mheap_, s) 326 } else { 327 // Otherwise, add it to a list of stack spans 328 // to be freed at the end of GC. 329 // 330 // TODO(austin): Make it possible to re-use 331 // these spans as stacks, like we do for small 332 // stack spans. (See issue #11466.) 333 lock(&stackpoolmu) 334 mSpanList_Insert(&stackFreeQueue, s) 335 unlock(&stackpoolmu) 336 } 337 } 338 } 339 340 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 341 342 var ptrnames = []string{ 343 0: "scalar", 344 1: "ptr", 345 } 346 347 // Stack frame layout 348 // 349 // (x86) 350 // +------------------+ 351 // | args from caller | 352 // +------------------+ <- frame->argp 353 // | return address | 354 // +------------------+ 355 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 356 // +------------------+ <- frame->varp 357 // | locals | 358 // +------------------+ 359 // | args to callee | 360 // +------------------+ <- frame->sp 361 // 362 // (arm) 363 // +------------------+ 364 // | args from caller | 365 // +------------------+ <- frame->argp 366 // | caller's retaddr | 367 // +------------------+ <- frame->varp 368 // | locals | 369 // +------------------+ 370 // | args to callee | 371 // +------------------+ 372 // | return address | 373 // +------------------+ <- frame->sp 374 375 type adjustinfo struct { 376 old stack 377 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 378 } 379 380 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 381 // If so, it rewrites *vpp to point into the new stack. 382 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 383 pp := (*unsafe.Pointer)(vpp) 384 p := *pp 385 if stackDebug >= 4 { 386 print(" ", pp, ":", p, "\n") 387 } 388 if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi { 389 *pp = add(p, adjinfo.delta) 390 if stackDebug >= 3 { 391 print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n") 392 } 393 } 394 } 395 396 // Information from the compiler about the layout of stack frames. 397 type bitvector struct { 398 n int32 // # of bits 399 bytedata *uint8 400 } 401 402 type gobitvector struct { 403 n uintptr 404 bytedata []uint8 405 } 406 407 func gobv(bv bitvector) gobitvector { 408 return gobitvector{ 409 uintptr(bv.n), 410 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 411 } 412 } 413 414 func ptrbit(bv *gobitvector, i uintptr) uint8 { 415 return (bv.bytedata[i/8] >> (i % 8)) & 1 416 } 417 418 // bv describes the memory starting at address scanp. 419 // Adjust any pointers contained therein. 420 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 421 bv := gobv(*cbv) 422 minp := adjinfo.old.lo 423 maxp := adjinfo.old.hi 424 delta := adjinfo.delta 425 num := uintptr(bv.n) 426 for i := uintptr(0); i < num; i++ { 427 if stackDebug >= 4 { 428 print(" ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 429 } 430 if ptrbit(&bv, i) == 1 { 431 pp := (*uintptr)(add(scanp, i*ptrSize)) 432 p := *pp 433 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack { 434 // Looks like a junk value in a pointer slot. 435 // Live analysis wrong? 436 getg().m.traceback = 2 437 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 438 throw("invalid stack pointer") 439 } 440 if minp <= p && p < maxp { 441 if stackDebug >= 3 { 442 print("adjust ptr ", p, " ", funcname(f), "\n") 443 } 444 *pp = p + delta 445 } 446 } 447 } 448 } 449 450 // Note: the argument/return area is adjusted by the callee. 451 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 452 adjinfo := (*adjustinfo)(arg) 453 targetpc := frame.continpc 454 if targetpc == 0 { 455 // Frame is dead. 456 return true 457 } 458 f := frame.fn 459 if stackDebug >= 2 { 460 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 461 } 462 if f.entry == systemstack_switchPC { 463 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 464 // We will allow it to be copied even though we don't 465 // have full GC info for it (because it is written in asm). 466 return true 467 } 468 if targetpc != f.entry { 469 targetpc-- 470 } 471 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc) 472 if pcdata == -1 { 473 pcdata = 0 // in prologue 474 } 475 476 // Adjust local variables if stack frame has been allocated. 477 size := frame.varp - frame.sp 478 var minsize uintptr 479 switch thechar { 480 case '6', '8': 481 minsize = 0 482 case '7': 483 minsize = spAlign 484 default: 485 minsize = ptrSize 486 } 487 if size > minsize { 488 var bv bitvector 489 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 490 if stackmap == nil || stackmap.n <= 0 { 491 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 492 throw("missing stackmap") 493 } 494 // Locals bitmap information, scan just the pointers in locals. 495 if pcdata < 0 || pcdata >= stackmap.n { 496 // don't know where we are 497 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 498 throw("bad symbol table") 499 } 500 bv = stackmapdata(stackmap, pcdata) 501 size = uintptr(bv.n) * ptrSize 502 if stackDebug >= 3 { 503 print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n") 504 } 505 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 506 } 507 508 // Adjust saved base pointer if there is one. 509 if thechar == '6' && frame.argp-frame.varp == 2*regSize { 510 if !framepointer_enabled { 511 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 512 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 513 throw("bad frame layout") 514 } 515 if stackDebug >= 3 { 516 print(" saved bp\n") 517 } 518 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 519 } 520 521 // Adjust arguments. 522 if frame.arglen > 0 { 523 var bv bitvector 524 if frame.argmap != nil { 525 bv = *frame.argmap 526 } else { 527 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 528 if stackmap == nil || stackmap.n <= 0 { 529 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n") 530 throw("missing stackmap") 531 } 532 if pcdata < 0 || pcdata >= stackmap.n { 533 // don't know where we are 534 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 535 throw("bad symbol table") 536 } 537 bv = stackmapdata(stackmap, pcdata) 538 } 539 if stackDebug >= 3 { 540 print(" args\n") 541 } 542 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 543 } 544 return true 545 } 546 547 func adjustctxt(gp *g, adjinfo *adjustinfo) { 548 adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt)) 549 } 550 551 func adjustdefers(gp *g, adjinfo *adjustinfo) { 552 // Adjust defer argument blocks the same way we adjust active stack frames. 553 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 554 555 // Adjust pointers in the Defer structs. 556 // Defer structs themselves are never on the stack. 557 for d := gp._defer; d != nil; d = d.link { 558 adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn)) 559 adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp)) 560 adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic)) 561 } 562 } 563 564 func adjustpanics(gp *g, adjinfo *adjustinfo) { 565 // Panics are on stack and already adjusted. 566 // Update pointer to head of list in G. 567 adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic)) 568 } 569 570 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 571 // the data elements pointed to by a SudoG structure 572 // might be in the stack. 573 for s := gp.waiting; s != nil; s = s.waitlink { 574 adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem)) 575 adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone)) 576 } 577 } 578 579 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 580 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 581 adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr)) 582 } 583 } 584 585 func fillstack(stk stack, b byte) { 586 for p := stk.lo; p < stk.hi; p++ { 587 *(*byte)(unsafe.Pointer(p)) = b 588 } 589 } 590 591 // Copies gp's stack to a new stack of a different size. 592 // Caller must have changed gp status to Gcopystack. 593 func copystack(gp *g, newsize uintptr) { 594 if gp.syscallsp != 0 { 595 throw("stack growth not allowed in system call") 596 } 597 old := gp.stack 598 if old.lo == 0 { 599 throw("nil stackbase") 600 } 601 used := old.hi - gp.sched.sp 602 603 // allocate new stack 604 new, newstkbar := stackalloc(uint32(newsize)) 605 if stackPoisonCopy != 0 { 606 fillstack(new, 0xfd) 607 } 608 if stackDebug >= 1 { 609 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 610 } 611 612 // adjust pointers in the to-be-copied frames 613 var adjinfo adjustinfo 614 adjinfo.old = old 615 adjinfo.delta = new.hi - old.hi 616 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 617 618 // adjust other miscellaneous things that have pointers into stacks. 619 adjustctxt(gp, &adjinfo) 620 adjustdefers(gp, &adjinfo) 621 adjustpanics(gp, &adjinfo) 622 adjustsudogs(gp, &adjinfo) 623 adjuststkbar(gp, &adjinfo) 624 625 // copy the stack to the new location 626 if stackPoisonCopy != 0 { 627 fillstack(new, 0xfb) 628 } 629 memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used) 630 631 // copy old stack barriers to new stack barrier array 632 newstkbar = newstkbar[:len(gp.stkbar)] 633 copy(newstkbar, gp.stkbar) 634 635 // Swap out old stack for new one 636 gp.stack = new 637 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 638 gp.sched.sp = new.hi - used 639 oldsize := gp.stackAlloc 640 gp.stackAlloc = newsize 641 gp.stkbar = newstkbar 642 gp.stktopsp += adjinfo.delta 643 644 // free old stack 645 if stackPoisonCopy != 0 { 646 fillstack(old, 0xfc) 647 } 648 stackfree(old, oldsize) 649 } 650 651 // round x up to a power of 2. 652 func round2(x int32) int32 { 653 s := uint(0) 654 for 1<<s < x { 655 s++ 656 } 657 return 1 << s 658 } 659 660 // Called from runtime·morestack when more stack is needed. 661 // Allocate larger stack and relocate to new stack. 662 // Stack growth is multiplicative, for constant amortized cost. 663 // 664 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 665 // If the GC is trying to stop this g then it will set preemptscan to true. 666 func newstack() { 667 thisg := getg() 668 // TODO: double check all gp. shouldn't be getg(). 669 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 670 throw("stack growth after fork") 671 } 672 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 673 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 674 morebuf := thisg.m.morebuf 675 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 676 throw("runtime: wrong goroutine in newstack") 677 } 678 if thisg.m.curg.throwsplit { 679 gp := thisg.m.curg 680 // Update syscallsp, syscallpc in case traceback uses them. 681 morebuf := thisg.m.morebuf 682 gp.syscallsp = morebuf.sp 683 gp.syscallpc = morebuf.pc 684 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 685 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 686 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 687 688 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 689 throw("runtime: stack split at bad time") 690 } 691 692 gp := thisg.m.curg 693 morebuf := thisg.m.morebuf 694 thisg.m.morebuf.pc = 0 695 thisg.m.morebuf.lr = 0 696 thisg.m.morebuf.sp = 0 697 thisg.m.morebuf.g = 0 698 rewindmorestack(&gp.sched) 699 700 // NOTE: stackguard0 may change underfoot, if another thread 701 // is about to try to preempt gp. Read it just once and use that same 702 // value now and below. 703 preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt 704 705 // Be conservative about where we preempt. 706 // We are interested in preempting user Go code, not runtime code. 707 // If we're holding locks, mallocing, or preemption is disabled, don't 708 // preempt. 709 // This check is very early in newstack so that even the status change 710 // from Grunning to Gwaiting and back doesn't happen in this case. 711 // That status change by itself can be viewed as a small preemption, 712 // because the GC might change Gwaiting to Gscanwaiting, and then 713 // this goroutine has to wait for the GC to finish before continuing. 714 // If the GC is in some way dependent on this goroutine (for example, 715 // it needs a lock held by the goroutine), that small preemption turns 716 // into a real deadlock. 717 if preempt { 718 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 719 // Let the goroutine keep running for now. 720 // gp->preempt is set, so it will be preempted next time. 721 gp.stackguard0 = gp.stack.lo + _StackGuard 722 gogo(&gp.sched) // never return 723 } 724 } 725 726 // The goroutine must be executing in order to call newstack, 727 // so it must be Grunning (or Gscanrunning). 728 casgstatus(gp, _Grunning, _Gwaiting) 729 gp.waitreason = "stack growth" 730 731 if gp.stack.lo == 0 { 732 throw("missing stack in newstack") 733 } 734 sp := gp.sched.sp 735 if thechar == '6' || thechar == '8' { 736 // The call to morestack cost a word. 737 sp -= ptrSize 738 } 739 if stackDebug >= 1 || sp < gp.stack.lo { 740 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 741 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 742 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 743 } 744 if sp < gp.stack.lo { 745 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 746 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 747 throw("runtime: split stack overflow") 748 } 749 750 if gp.sched.ctxt != nil { 751 // morestack wrote sched.ctxt on its way in here, 752 // without a write barrier. Run the write barrier now. 753 // It is not possible to be preempted between then 754 // and now, so it's okay. 755 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) 756 } 757 758 if preempt { 759 if gp == thisg.m.g0 { 760 throw("runtime: preempt g0") 761 } 762 if thisg.m.p == 0 && thisg.m.locks == 0 { 763 throw("runtime: g is running but p is not") 764 } 765 if gp.preemptscan { 766 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 767 // Likely to be racing with the GC as 768 // it sees a _Gwaiting and does the 769 // stack scan. If so, gcworkdone will 770 // be set and gcphasework will simply 771 // return. 772 } 773 if !gp.gcscandone { 774 scanstack(gp) 775 gp.gcscandone = true 776 } 777 gp.preemptscan = false 778 gp.preempt = false 779 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 780 casgstatus(gp, _Gwaiting, _Grunning) 781 gp.stackguard0 = gp.stack.lo + _StackGuard 782 gogo(&gp.sched) // never return 783 } 784 785 // Act like goroutine called runtime.Gosched. 786 casgstatus(gp, _Gwaiting, _Grunning) 787 gopreempt_m(gp) // never return 788 } 789 790 // Allocate a bigger segment and move the stack. 791 oldsize := int(gp.stackAlloc) 792 newsize := oldsize * 2 793 if uintptr(newsize) > maxstacksize { 794 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 795 throw("stack overflow") 796 } 797 798 casgstatus(gp, _Gwaiting, _Gcopystack) 799 800 // The concurrent GC will not scan the stack while we are doing the copy since 801 // the gp is in a Gcopystack status. 802 copystack(gp, uintptr(newsize)) 803 if stackDebug >= 1 { 804 print("stack grow done\n") 805 } 806 casgstatus(gp, _Gcopystack, _Grunning) 807 gogo(&gp.sched) 808 } 809 810 //go:nosplit 811 func nilfunc() { 812 *(*uint8)(nil) = 0 813 } 814 815 // adjust Gobuf as if it executed a call to fn 816 // and then did an immediate gosave. 817 func gostartcallfn(gobuf *gobuf, fv *funcval) { 818 var fn unsafe.Pointer 819 if fv != nil { 820 fn = (unsafe.Pointer)(fv.fn) 821 } else { 822 fn = unsafe.Pointer(funcPC(nilfunc)) 823 } 824 gostartcall(gobuf, fn, (unsafe.Pointer)(fv)) 825 } 826 827 // Maybe shrink the stack being used by gp. 828 // Called at garbage collection time. 829 func shrinkstack(gp *g) { 830 if readgstatus(gp) == _Gdead { 831 if gp.stack.lo != 0 { 832 // Free whole stack - it will get reallocated 833 // if G is used again. 834 stackfree(gp.stack, gp.stackAlloc) 835 gp.stack.lo = 0 836 gp.stack.hi = 0 837 gp.stkbar = nil 838 gp.stkbarPos = 0 839 } 840 return 841 } 842 if gp.stack.lo == 0 { 843 throw("missing stack in shrinkstack") 844 } 845 846 if debug.gcshrinkstackoff > 0 { 847 return 848 } 849 850 oldsize := gp.stackAlloc 851 newsize := oldsize / 2 852 // Don't shrink the allocation below the minimum-sized stack 853 // allocation. 854 if newsize < _FixedStack { 855 return 856 } 857 // Compute how much of the stack is currently in use and only 858 // shrink the stack if gp is using less than a quarter of its 859 // current stack. The currently used stack includes everything 860 // down to the SP plus the stack guard space that ensures 861 // there's room for nosplit functions. 862 avail := gp.stack.hi - gp.stack.lo 863 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 864 return 865 } 866 867 // We can't copy the stack if we're in a syscall. 868 // The syscall might have pointers into the stack. 869 if gp.syscallsp != 0 { 870 return 871 } 872 if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 873 return 874 } 875 876 if stackDebug > 0 { 877 print("shrinking stack ", oldsize, "->", newsize, "\n") 878 } 879 880 oldstatus := casgcopystack(gp) 881 copystack(gp, newsize) 882 casgstatus(gp, _Gcopystack, oldstatus) 883 } 884 885 // freeStackSpans frees unused stack spans at the end of GC. 886 func freeStackSpans() { 887 lock(&stackpoolmu) 888 889 // Scan stack pools for empty stack spans. 890 for order := range stackpool { 891 list := &stackpool[order] 892 for s := list.next; s != list; { 893 next := s.next 894 if s.ref == 0 { 895 mSpanList_Remove(s) 896 s.freelist = 0 897 mHeap_FreeStack(&mheap_, s) 898 } 899 s = next 900 } 901 } 902 903 // Free queued stack spans. 904 for stackFreeQueue.next != &stackFreeQueue { 905 s := stackFreeQueue.next 906 mSpanList_Remove(s) 907 mHeap_FreeStack(&mheap_, s) 908 } 909 910 unlock(&stackpoolmu) 911 } 912 913 //go:nosplit 914 func morestackc() { 915 systemstack(func() { 916 throw("attempt to execute C code on Go stack") 917 }) 918 }