github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/stack1.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 const ( 10 // stackDebug == 0: no logging 11 // == 1: logging of per-stack operations 12 // == 2: logging of per-frame operations 13 // == 3: logging of per-word updates 14 // == 4: logging of per-word reads 15 stackDebug = 0 16 stackFromSystem = 0 // allocate stacks from system memory instead of the heap 17 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free 18 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy 19 20 stackCache = 1 21 ) 22 23 const ( 24 uintptrMask = 1<<(8*ptrSize) - 1 25 poisonStack = uintptrMask & 0x6868686868686868 26 27 // Goroutine preemption request. 28 // Stored into g->stackguard0 to cause split stack check failure. 29 // Must be greater than any real sp. 30 // 0xfffffade in hex. 31 stackPreempt = uintptrMask & -1314 32 33 // Thread is forking. 34 // Stored into g->stackguard0 to cause split stack check failure. 35 // Must be greater than any real sp. 36 stackFork = uintptrMask & -1234 37 ) 38 39 // Global pool of spans that have free stacks. 40 // Stacks are assigned an order according to size. 41 // order = log_2(size/FixedStack) 42 // There is a free list for each order. 43 // TODO: one lock per order? 44 // 全局的栈缓存 2k/4k/8k/16k 45 var stackpool [_NumStackOrders]mspan 46 var stackpoolmu mutex 47 48 // List of stack spans to be freed at the end of GC. Protected by 49 // stackpoolmu. 50 var stackFreeQueue mspan 51 52 // Cached value of haveexperiment("framepointer") 53 var framepointer_enabled bool 54 55 func stackinit() { 56 if _StackCacheSize&_PageMask != 0 { 57 throw("cache size must be a multiple of page size") 58 } 59 for i := range stackpool { 60 mSpanList_Init(&stackpool[i]) 61 } 62 mSpanList_Init(&stackFreeQueue) 63 } 64 65 // Allocates a stack from the free pool. Must be called with 66 // stackpoolmu held. 67 func stackpoolalloc(order uint8) gclinkptr { 68 list := &stackpool[order] 69 s := list.next 70 if s == list { 71 // no free stacks. Allocate another span worth. 72 s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift) 73 if s == nil { 74 throw("out of memory") 75 } 76 if s.ref != 0 { 77 throw("bad ref") 78 } 79 if s.freelist.ptr() != nil { 80 throw("bad freelist") 81 } 82 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { 83 x := gclinkptr(uintptr(s.start)<<_PageShift + i) 84 x.ptr().next = s.freelist 85 s.freelist = x 86 } 87 mSpanList_Insert(list, s) 88 } 89 x := s.freelist 90 if x.ptr() == nil { 91 throw("span has no free stacks") 92 } 93 s.freelist = x.ptr().next 94 s.ref++ 95 if s.freelist.ptr() == nil { 96 // all stacks in s are allocated. 97 mSpanList_Remove(s) 98 } 99 return x 100 } 101 102 // Adds stack x to the free pool. Must be called with stackpoolmu held. 103 func stackpoolfree(x gclinkptr, order uint8) { 104 s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x)) 105 if s.state != _MSpanStack { 106 throw("freeing stack not in a stack span") 107 } 108 if s.freelist.ptr() == nil { 109 // s will now have a free stack 110 mSpanList_Insert(&stackpool[order], s) 111 } 112 x.ptr().next = s.freelist 113 s.freelist = x 114 s.ref-- 115 if gcphase == _GCoff && s.ref == 0 { 116 // Span is completely free. Return it to the heap 117 // immediately if we're sweeping. 118 // 119 // If GC is active, we delay the free until the end of 120 // GC to avoid the following type of situation: 121 // 122 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer 123 // 2) The stack that pointer points to is copied 124 // 3) The old stack is freed 125 // 4) The containing span is marked free 126 // 5) GC attempts to mark the SudoG.elem pointer. The 127 // marking fails because the pointer looks like a 128 // pointer into a free span. 129 // 130 // By not freeing, we prevent step #4 until GC is done. 131 mSpanList_Remove(s) 132 s.freelist = 0 133 mHeap_FreeStack(&mheap_, s) 134 } 135 } 136 137 // stackcacherefill/stackcacherelease implement a global pool of stack segments. 138 // The pool is required to prevent unlimited growth of per-thread caches. 139 func stackcacherefill(c *mcache, order uint8) { 140 if stackDebug >= 1 { 141 print("stackcacherefill order=", order, "\n") 142 } 143 144 // Grab some stacks from the global cache. 145 // Grab half of the allowed capacity (to prevent thrashing). 146 var list gclinkptr 147 var size uintptr 148 lock(&stackpoolmu) 149 for size < _StackCacheSize/2 { 150 x := stackpoolalloc(order) 151 x.ptr().next = list 152 list = x 153 size += _FixedStack << order 154 } 155 unlock(&stackpoolmu) 156 c.stackcache[order].list = list 157 c.stackcache[order].size = size 158 } 159 160 func stackcacherelease(c *mcache, order uint8) { 161 if stackDebug >= 1 { 162 print("stackcacherelease order=", order, "\n") 163 } 164 x := c.stackcache[order].list 165 size := c.stackcache[order].size 166 lock(&stackpoolmu) 167 for size > _StackCacheSize/2 { 168 y := x.ptr().next 169 stackpoolfree(x, order) 170 x = y 171 size -= _FixedStack << order 172 } 173 unlock(&stackpoolmu) 174 c.stackcache[order].list = x 175 c.stackcache[order].size = size 176 } 177 178 func stackcache_clear(c *mcache) { 179 if stackDebug >= 1 { 180 print("stackcache clear\n") 181 } 182 lock(&stackpoolmu) 183 for order := uint8(0); order < _NumStackOrders; order++ { 184 x := c.stackcache[order].list 185 for x.ptr() != nil { 186 y := x.ptr().next 187 stackpoolfree(x, order) 188 x = y 189 } 190 c.stackcache[order].list = 0 191 c.stackcache[order].size = 0 192 } 193 unlock(&stackpoolmu) 194 } 195 196 func stackalloc(n uint32) (stack, []stkbar) { 197 // Stackalloc must be called on scheduler stack, so that we 198 // never try to grow the stack during the code that stackalloc runs. 199 // Doing so would cause a deadlock (issue 1547). 200 thisg := getg() 201 if thisg != thisg.m.g0 { 202 throw("stackalloc not on scheduler stack") 203 } 204 if n&(n-1) != 0 { 205 throw("stack size not a power of 2") 206 } 207 if stackDebug >= 1 { 208 print("stackalloc ", n, "\n") 209 } 210 211 // Compute the size of stack barrier array. 212 maxstkbar := gcMaxStackBarriers(int(n)) 213 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar) 214 215 if debug.efence != 0 || stackFromSystem != 0 { 216 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys) 217 if v == nil { 218 throw("out of memory (stackalloc)") 219 } 220 top := uintptr(n) - nstkbar 221 stkbarSlice := slice{add(v, top), 0, maxstkbar} 222 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 223 } 224 225 // Small stacks are allocated with a fixed-size free-list allocator. 226 // If we need a stack of a bigger size, we fall back on allocating 227 // a dedicated span. 228 var v unsafe.Pointer 229 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 230 order := uint8(0) 231 n2 := n 232 for n2 > _FixedStack { 233 order++ 234 n2 >>= 1 235 } 236 var x gclinkptr 237 c := thisg.m.mcache 238 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { 239 // c == nil can happen in the guts of exitsyscall or 240 // procresize. Just get a stack from the global pool. 241 // Also don't touch stackcache during gc 242 // as it's flushed concurrently. 243 lock(&stackpoolmu) 244 x = stackpoolalloc(order) 245 unlock(&stackpoolmu) 246 } else { 247 x = c.stackcache[order].list 248 if x.ptr() == nil { 249 stackcacherefill(c, order) 250 x = c.stackcache[order].list 251 } 252 c.stackcache[order].list = x.ptr().next 253 c.stackcache[order].size -= uintptr(n) 254 } 255 v = (unsafe.Pointer)(x) 256 } else { 257 s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift) 258 if s == nil { 259 throw("out of memory") 260 } 261 v = (unsafe.Pointer)(s.start << _PageShift) 262 } 263 264 if raceenabled { 265 racemalloc(v, uintptr(n)) 266 } 267 if stackDebug >= 1 { 268 print(" allocated ", v, "\n") 269 } 270 top := uintptr(n) - nstkbar 271 stkbarSlice := slice{add(v, top), 0, maxstkbar} 272 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice)) 273 } 274 275 func stackfree(stk stack, n uintptr) { 276 gp := getg() 277 v := (unsafe.Pointer)(stk.lo) 278 if n&(n-1) != 0 { 279 throw("stack not a power of 2") 280 } 281 if stk.lo+n < stk.hi { 282 throw("bad stack size") 283 } 284 if stackDebug >= 1 { 285 println("stackfree", v, n) 286 memclr(v, n) // for testing, clobber stack data 287 } 288 if debug.efence != 0 || stackFromSystem != 0 { 289 if debug.efence != 0 || stackFaultOnFree != 0 { 290 sysFault(v, n) 291 } else { 292 sysFree(v, n, &memstats.stacks_sys) 293 } 294 return 295 } 296 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { 297 order := uint8(0) 298 n2 := n 299 for n2 > _FixedStack { 300 order++ 301 n2 >>= 1 302 } 303 x := gclinkptr(v) 304 c := gp.m.mcache 305 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { 306 lock(&stackpoolmu) 307 stackpoolfree(x, order) 308 unlock(&stackpoolmu) 309 } else { 310 if c.stackcache[order].size >= _StackCacheSize { 311 stackcacherelease(c, order) 312 } 313 x.ptr().next = c.stackcache[order].list 314 c.stackcache[order].list = x 315 c.stackcache[order].size += n 316 } 317 } else { 318 s := mHeap_Lookup(&mheap_, v) 319 if s.state != _MSpanStack { 320 println(hex(s.start<<_PageShift), v) 321 throw("bad span state") 322 } 323 if gcphase == _GCoff { 324 // Free the stack immediately if we're 325 // sweeping. 326 mHeap_FreeStack(&mheap_, s) 327 } else { 328 // Otherwise, add it to a list of stack spans 329 // to be freed at the end of GC. 330 // 331 // TODO(austin): Make it possible to re-use 332 // these spans as stacks, like we do for small 333 // stack spans. (See issue #11466.) 334 lock(&stackpoolmu) 335 mSpanList_Insert(&stackFreeQueue, s) 336 unlock(&stackpoolmu) 337 } 338 } 339 } 340 341 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real 342 343 var ptrnames = []string{ 344 0: "scalar", 345 1: "ptr", 346 } 347 348 // Stack frame layout 349 // 350 // (x86) 351 // +------------------+ 352 // | args from caller | 353 // +------------------+ <- frame->argp 354 // | return address | 355 // +------------------+ 356 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp 357 // +------------------+ <- frame->varp 358 // | locals | 359 // +------------------+ 360 // | args to callee | 361 // +------------------+ <- frame->sp 362 // 363 // (arm) 364 // +------------------+ 365 // | args from caller | 366 // +------------------+ <- frame->argp 367 // | caller's retaddr | 368 // +------------------+ <- frame->varp 369 // | locals | 370 // +------------------+ 371 // | args to callee | 372 // +------------------+ 373 // | return address | 374 // +------------------+ <- frame->sp 375 376 type adjustinfo struct { 377 old stack 378 delta uintptr // ptr distance from old to new stack (newbase - oldbase) 379 } 380 381 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. 382 // If so, it rewrites *vpp to point into the new stack. 383 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 384 pp := (*unsafe.Pointer)(vpp) 385 p := *pp 386 if stackDebug >= 4 { 387 print(" ", pp, ":", p, "\n") 388 } 389 if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi { 390 *pp = add(p, adjinfo.delta) 391 if stackDebug >= 3 { 392 print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n") 393 } 394 } 395 } 396 397 // Information from the compiler about the layout of stack frames. 398 type bitvector struct { 399 n int32 // # of bits 400 bytedata *uint8 401 } 402 403 type gobitvector struct { 404 n uintptr 405 bytedata []uint8 406 } 407 408 func gobv(bv bitvector) gobitvector { 409 return gobitvector{ 410 uintptr(bv.n), 411 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], 412 } 413 } 414 415 func ptrbit(bv *gobitvector, i uintptr) uint8 { 416 return (bv.bytedata[i/8] >> (i % 8)) & 1 417 } 418 419 // bv describes the memory starting at address scanp. 420 // Adjust any pointers contained therein. 421 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) { 422 bv := gobv(*cbv) 423 minp := adjinfo.old.lo 424 maxp := adjinfo.old.hi 425 delta := adjinfo.delta 426 num := uintptr(bv.n) 427 for i := uintptr(0); i < num; i++ { 428 if stackDebug >= 4 { 429 print(" ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") 430 } 431 if ptrbit(&bv, i) == 1 { 432 pp := (*uintptr)(add(scanp, i*ptrSize)) 433 p := *pp 434 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack { 435 // Looks like a junk value in a pointer slot. 436 // Live analysis wrong? 437 getg().m.traceback = 2 438 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 439 throw("invalid stack pointer") 440 } 441 if minp <= p && p < maxp { 442 if stackDebug >= 3 { 443 print("adjust ptr ", p, " ", funcname(f), "\n") 444 } 445 *pp = p + delta 446 } 447 } 448 } 449 } 450 451 // Note: the argument/return area is adjusted by the callee. 452 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { 453 adjinfo := (*adjustinfo)(arg) 454 targetpc := frame.continpc 455 if targetpc == 0 { 456 // Frame is dead. 457 return true 458 } 459 f := frame.fn 460 if stackDebug >= 2 { 461 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 462 } 463 if f.entry == systemstack_switchPC { 464 // A special routine at the bottom of stack of a goroutine that does an systemstack call. 465 // We will allow it to be copied even though we don't 466 // have full GC info for it (because it is written in asm). 467 return true 468 } 469 if targetpc != f.entry { 470 targetpc-- 471 } 472 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc) 473 if pcdata == -1 { 474 pcdata = 0 // in prologue 475 } 476 477 // Adjust local variables if stack frame has been allocated. 478 size := frame.varp - frame.sp 479 var minsize uintptr 480 switch thechar { 481 case '6', '8': 482 minsize = 0 483 case '7': 484 minsize = spAlign 485 default: 486 minsize = ptrSize 487 } 488 if size > minsize { 489 var bv bitvector 490 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 491 if stackmap == nil || stackmap.n <= 0 { 492 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 493 throw("missing stackmap") 494 } 495 // Locals bitmap information, scan just the pointers in locals. 496 if pcdata < 0 || pcdata >= stackmap.n { 497 // don't know where we are 498 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 499 throw("bad symbol table") 500 } 501 bv = stackmapdata(stackmap, pcdata) 502 size = uintptr(bv.n) * ptrSize 503 if stackDebug >= 3 { 504 print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n") 505 } 506 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) 507 } 508 509 // Adjust saved base pointer if there is one. 510 if thechar == '6' && frame.argp-frame.varp == 2*regSize { 511 if !framepointer_enabled { 512 print("runtime: found space for saved base pointer, but no framepointer experiment\n") 513 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") 514 throw("bad frame layout") 515 } 516 if stackDebug >= 3 { 517 print(" saved bp\n") 518 } 519 adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) 520 } 521 522 // Adjust arguments. 523 if frame.arglen > 0 { 524 var bv bitvector 525 if frame.argmap != nil { 526 bv = *frame.argmap 527 } else { 528 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 529 if stackmap == nil || stackmap.n <= 0 { 530 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n") 531 throw("missing stackmap") 532 } 533 if pcdata < 0 || pcdata >= stackmap.n { 534 // don't know where we are 535 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 536 throw("bad symbol table") 537 } 538 bv = stackmapdata(stackmap, pcdata) 539 } 540 if stackDebug >= 3 { 541 print(" args\n") 542 } 543 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil) 544 } 545 return true 546 } 547 548 func adjustctxt(gp *g, adjinfo *adjustinfo) { 549 adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt)) 550 } 551 552 func adjustdefers(gp *g, adjinfo *adjustinfo) { 553 // Adjust defer argument blocks the same way we adjust active stack frames. 554 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) 555 556 // Adjust pointers in the Defer structs. 557 // Defer structs themselves are never on the stack. 558 for d := gp._defer; d != nil; d = d.link { 559 adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn)) 560 adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp)) 561 adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic)) 562 } 563 } 564 565 func adjustpanics(gp *g, adjinfo *adjustinfo) { 566 // Panics are on stack and already adjusted. 567 // Update pointer to head of list in G. 568 adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic)) 569 } 570 571 func adjustsudogs(gp *g, adjinfo *adjustinfo) { 572 // the data elements pointed to by a SudoG structure 573 // might be in the stack. 574 for s := gp.waiting; s != nil; s = s.waitlink { 575 adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem)) 576 adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone)) 577 } 578 } 579 580 func adjuststkbar(gp *g, adjinfo *adjustinfo) { 581 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { 582 adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr)) 583 } 584 } 585 586 func fillstack(stk stack, b byte) { 587 for p := stk.lo; p < stk.hi; p++ { 588 *(*byte)(unsafe.Pointer(p)) = b 589 } 590 } 591 592 // Copies gp's stack to a new stack of a different size. 593 // Caller must have changed gp status to Gcopystack. 594 func copystack(gp *g, newsize uintptr) { 595 if gp.syscallsp != 0 { 596 throw("stack growth not allowed in system call") 597 } 598 old := gp.stack 599 if old.lo == 0 { 600 throw("nil stackbase") 601 } 602 used := old.hi - gp.sched.sp 603 604 // allocate new stack 605 new, newstkbar := stackalloc(uint32(newsize)) 606 if stackPoisonCopy != 0 { 607 fillstack(new, 0xfd) 608 } 609 if stackDebug >= 1 { 610 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 611 } 612 613 // adjust pointers in the to-be-copied frames 614 var adjinfo adjustinfo 615 adjinfo.old = old 616 adjinfo.delta = new.hi - old.hi 617 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) 618 619 // adjust other miscellaneous things that have pointers into stacks. 620 adjustctxt(gp, &adjinfo) 621 adjustdefers(gp, &adjinfo) 622 adjustpanics(gp, &adjinfo) 623 adjustsudogs(gp, &adjinfo) 624 adjuststkbar(gp, &adjinfo) 625 626 // copy the stack to the new location 627 if stackPoisonCopy != 0 { 628 fillstack(new, 0xfb) 629 } 630 memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used) 631 632 // copy old stack barriers to new stack barrier array 633 newstkbar = newstkbar[:len(gp.stkbar)] 634 copy(newstkbar, gp.stkbar) 635 636 // Swap out old stack for new one 637 gp.stack = new 638 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request 639 gp.sched.sp = new.hi - used 640 oldsize := gp.stackAlloc 641 gp.stackAlloc = newsize 642 gp.stkbar = newstkbar 643 644 // free old stack 645 if stackPoisonCopy != 0 { 646 fillstack(old, 0xfc) 647 } 648 stackfree(old, oldsize) 649 } 650 651 // round x up to a power of 2. 652 func round2(x int32) int32 { 653 s := uint(0) 654 for 1<<s < x { 655 s++ 656 } 657 return 1 << s 658 } 659 660 // Called from runtime·morestack when more stack is needed. 661 // Allocate larger stack and relocate to new stack. 662 // Stack growth is multiplicative, for constant amortized cost. 663 // 664 // g->atomicstatus will be Grunning or Gscanrunning upon entry. 665 // If the GC is trying to stop this g then it will set preemptscan to true. 666 func newstack() { 667 thisg := getg() 668 // TODO: double check all gp. shouldn't be getg(). 669 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 670 throw("stack growth after fork") 671 } 672 if thisg.m.morebuf.g.ptr() != thisg.m.curg { 673 print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") 674 morebuf := thisg.m.morebuf 675 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) 676 throw("runtime: wrong goroutine in newstack") 677 } 678 if thisg.m.curg.throwsplit { 679 gp := thisg.m.curg 680 // Update syscallsp, syscallpc in case traceback uses them. 681 morebuf := thisg.m.morebuf 682 gp.syscallsp = morebuf.sp 683 gp.syscallpc = morebuf.pc 684 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 685 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 686 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 687 688 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) 689 throw("runtime: stack split at bad time") 690 } 691 692 gp := thisg.m.curg 693 morebuf := thisg.m.morebuf 694 thisg.m.morebuf.pc = 0 695 thisg.m.morebuf.lr = 0 696 thisg.m.morebuf.sp = 0 697 thisg.m.morebuf.g = 0 698 rewindmorestack(&gp.sched) 699 700 // NOTE: stackguard0 may change underfoot, if another thread 701 // is about to try to preempt gp. Read it just once and use that same 702 // value now and below. 703 preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt 704 705 // Be conservative about where we preempt. 706 // We are interested in preempting user Go code, not runtime code. 707 // If we're holding locks, mallocing, or preemption is disabled, don't 708 // preempt. 709 // This check is very early in newstack so that even the status change 710 // from Grunning to Gwaiting and back doesn't happen in this case. 711 // That status change by itself can be viewed as a small preemption, 712 // because the GC might change Gwaiting to Gscanwaiting, and then 713 // this goroutine has to wait for the GC to finish before continuing. 714 // If the GC is in some way dependent on this goroutine (for example, 715 // it needs a lock held by the goroutine), that small preemption turns 716 // into a real deadlock. 717 if preempt { 718 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { 719 // Let the goroutine keep running for now. 720 // gp->preempt is set, so it will be preempted next time. 721 gp.stackguard0 = gp.stack.lo + _StackGuard 722 gogo(&gp.sched) // never return 723 } 724 } 725 726 // The goroutine must be executing in order to call newstack, 727 // so it must be Grunning (or Gscanrunning). 728 casgstatus(gp, _Grunning, _Gwaiting) 729 gp.waitreason = "stack growth" 730 731 if gp.stack.lo == 0 { 732 throw("missing stack in newstack") 733 } 734 sp := gp.sched.sp 735 if thechar == '6' || thechar == '8' { 736 // The call to morestack cost a word. 737 sp -= ptrSize 738 } 739 if stackDebug >= 1 || sp < gp.stack.lo { 740 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 741 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 742 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 743 } 744 if sp < gp.stack.lo { 745 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") 746 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 747 throw("runtime: split stack overflow") 748 } 749 750 if gp.sched.ctxt != nil { 751 // morestack wrote sched.ctxt on its way in here, 752 // without a write barrier. Run the write barrier now. 753 // It is not possible to be preempted between then 754 // and now, so it's okay. 755 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) 756 } 757 758 if preempt { 759 if gp == thisg.m.g0 { 760 throw("runtime: preempt g0") 761 } 762 if thisg.m.p == 0 && thisg.m.locks == 0 { 763 throw("runtime: g is running but p is not") 764 } 765 if gp.preemptscan { 766 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { 767 // Likely to be racing with the GC as 768 // it sees a _Gwaiting and does the 769 // stack scan. If so, gcworkdone will 770 // be set and gcphasework will simply 771 // return. 772 } 773 if !gp.gcscandone { 774 scanstack(gp) 775 gp.gcscandone = true 776 } 777 gp.preemptscan = false 778 gp.preempt = false 779 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) 780 casgstatus(gp, _Gwaiting, _Grunning) 781 gp.stackguard0 = gp.stack.lo + _StackGuard 782 gogo(&gp.sched) // never return 783 } 784 785 // Act like goroutine called runtime.Gosched. 786 casgstatus(gp, _Gwaiting, _Grunning) 787 gopreempt_m(gp) // never return 788 } 789 790 // Allocate a bigger segment and move the stack. 791 oldsize := int(gp.stackAlloc) 792 newsize := oldsize * 2 793 if uintptr(newsize) > maxstacksize { 794 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 795 throw("stack overflow") 796 } 797 798 casgstatus(gp, _Gwaiting, _Gcopystack) 799 800 // The concurrent GC will not scan the stack while we are doing the copy since 801 // the gp is in a Gcopystack status. 802 copystack(gp, uintptr(newsize)) 803 if stackDebug >= 1 { 804 print("stack grow done\n") 805 } 806 casgstatus(gp, _Gcopystack, _Grunning) 807 gogo(&gp.sched) 808 } 809 810 //go:nosplit 811 func nilfunc() { 812 *(*uint8)(nil) = 0 813 } 814 815 // adjust Gobuf as if it executed a call to fn 816 // and then did an immediate gosave. 817 func gostartcallfn(gobuf *gobuf, fv *funcval) { 818 var fn unsafe.Pointer 819 if fv != nil { 820 fn = (unsafe.Pointer)(fv.fn) 821 } else { 822 fn = unsafe.Pointer(funcPC(nilfunc)) 823 } 824 gostartcall(gobuf, fn, (unsafe.Pointer)(fv)) 825 } 826 827 // Maybe shrink the stack being used by gp. 828 // Called at garbage collection time. 829 func shrinkstack(gp *g) { 830 if readgstatus(gp) == _Gdead { 831 if gp.stack.lo != 0 { 832 // Free whole stack - it will get reallocated 833 // if G is used again. 834 stackfree(gp.stack, gp.stackAlloc) 835 gp.stack.lo = 0 836 gp.stack.hi = 0 837 gp.stkbar = nil 838 gp.stkbarPos = 0 839 } 840 return 841 } 842 if gp.stack.lo == 0 { 843 throw("missing stack in shrinkstack") 844 } 845 846 if debug.gcshrinkstackoff > 0 { 847 return 848 } 849 850 oldsize := gp.stackAlloc 851 newsize := oldsize / 2 852 // Don't shrink the allocation below the minimum-sized stack 853 // allocation. 854 if newsize < _FixedStack { 855 return 856 } 857 // Compute how much of the stack is currently in use and only 858 // shrink the stack if gp is using less than a quarter of its 859 // current stack. The currently used stack includes everything 860 // down to the SP plus the stack guard space that ensures 861 // there's room for nosplit functions. 862 avail := gp.stack.hi - gp.stack.lo 863 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { 864 return 865 } 866 867 // We can't copy the stack if we're in a syscall. 868 // The syscall might have pointers into the stack. 869 if gp.syscallsp != 0 { 870 return 871 } 872 if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 { 873 return 874 } 875 876 if stackDebug > 0 { 877 print("shrinking stack ", oldsize, "->", newsize, "\n") 878 } 879 880 oldstatus := casgcopystack(gp) 881 copystack(gp, newsize) 882 casgstatus(gp, _Gcopystack, oldstatus) 883 } 884 885 // freeStackSpans frees unused stack spans at the end of GC. 886 func freeStackSpans() { 887 lock(&stackpoolmu) 888 889 // Scan stack pools for empty stack spans. 890 for order := range stackpool { 891 list := &stackpool[order] 892 for s := list.next; s != list; { 893 next := s.next 894 if s.ref == 0 { 895 mSpanList_Remove(s) 896 s.freelist = 0 897 mHeap_FreeStack(&mheap_, s) 898 } 899 s = next 900 } 901 } 902 903 // Free queued stack spans. 904 for stackFreeQueue.next != &stackFreeQueue { 905 s := stackFreeQueue.next 906 mSpanList_Remove(s) 907 mHeap_FreeStack(&mheap_, s) 908 } 909 910 unlock(&stackpoolmu) 911 } 912 913 //go:nosplit 914 func morestackc() { 915 systemstack(func() { 916 throw("attempt to execute C code on Go stack") 917 }) 918 }