github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 const ( 16 fixedRootFinalizers = iota 17 fixedRootFlushCaches 18 fixedRootFreeGStacks 19 fixedRootCount 20 21 // rootBlockBytes is the number of bytes to scan per data or 22 // BSS root. 23 rootBlockBytes = 256 << 10 24 25 // rootBlockSpans is the number of spans to scan per span 26 // root. 27 rootBlockSpans = 8 * 1024 // 64MB worth of spans 28 ) 29 30 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 31 // some miscellany) and initializes scanning-related state. 32 // 33 // The caller must have call gcCopySpans(). 34 // 35 // The world must be stopped. 36 // 37 //go:nowritebarrier 38 func gcMarkRootPrepare() { 39 // Compute how many data and BSS root blocks there are. 40 nBlocks := func(bytes uintptr) int { 41 return int((bytes + rootBlockBytes - 1) / rootBlockBytes) 42 } 43 44 work.nDataRoots = 0 45 work.nBSSRoots = 0 46 47 // Only scan globals once per cycle; preferably concurrently. 48 if !work.markrootDone { 49 for datap := &firstmoduledata; datap != nil; datap = datap.next { 50 nDataRoots := nBlocks(datap.edata - datap.data) 51 if nDataRoots > work.nDataRoots { 52 work.nDataRoots = nDataRoots 53 } 54 } 55 56 for datap := &firstmoduledata; datap != nil; datap = datap.next { 57 nBSSRoots := nBlocks(datap.ebss - datap.bss) 58 if nBSSRoots > work.nBSSRoots { 59 work.nBSSRoots = nBSSRoots 60 } 61 } 62 } 63 64 if !work.markrootDone { 65 // On the first markroot, we need to scan span roots. 66 // In concurrent GC, this happens during concurrent 67 // mark and we depend on addfinalizer to ensure the 68 // above invariants for objects that get finalizers 69 // after concurrent mark. In STW GC, this will happen 70 // during mark termination. 71 work.nSpanRoots = (len(work.spans) + rootBlockSpans - 1) / rootBlockSpans 72 73 // On the first markroot, we need to scan all Gs. Gs 74 // may be created after this point, but it's okay that 75 // we ignore them because they begin life without any 76 // roots, so there's nothing to scan, and any roots 77 // they create during the concurrent phase will be 78 // scanned during mark termination. During mark 79 // termination, allglen isn't changing, so we'll scan 80 // all Gs. 81 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 82 work.nRescanRoots = 0 83 } else { 84 // We've already scanned span roots and kept the scan 85 // up-to-date during concurrent mark. 86 work.nSpanRoots = 0 87 88 // On the second pass of markroot, we're just scanning 89 // dirty stacks. It's safe to access rescan since the 90 // world is stopped. 91 work.nStackRoots = 0 92 work.nRescanRoots = len(work.rescan.list) 93 } 94 95 work.markrootNext = 0 96 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots) 97 } 98 99 // gcMarkRootCheck checks that all roots have been scanned. It is 100 // purely for debugging. 101 func gcMarkRootCheck() { 102 if work.markrootNext < work.markrootJobs { 103 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 104 throw("left over markroot jobs") 105 } 106 107 lock(&allglock) 108 // Check that stacks have been scanned. 109 if gcphase == _GCmarktermination { 110 for i := 0; i < len(allgs); i++ { 111 gp := allgs[i] 112 if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead { 113 println("gp", gp, "goid", gp.goid, 114 "status", readgstatus(gp), 115 "gcscandone", gp.gcscandone, 116 "gcscanvalid", gp.gcscanvalid) 117 throw("scan missed a g") 118 } 119 } 120 } else { 121 for i := 0; i < work.nStackRoots; i++ { 122 gp := allgs[i] 123 if !gp.gcscandone { 124 throw("scan missed a g") 125 } 126 } 127 } 128 unlock(&allglock) 129 } 130 131 // ptrmask for an allocation containing a single pointer. 132 var oneptrmask = [...]uint8{1} 133 134 // markroot scans the i'th root. 135 // 136 // Preemption must be disabled (because this uses a gcWork). 137 // 138 // nowritebarrier is only advisory here. 139 // 140 //go:nowritebarrier 141 func markroot(gcw *gcWork, i uint32) { 142 // TODO(austin): This is a bit ridiculous. Compute and store 143 // the bases in gcMarkRootPrepare instead of the counts. 144 baseData := uint32(fixedRootCount) 145 baseBSS := baseData + uint32(work.nDataRoots) 146 baseSpans := baseBSS + uint32(work.nBSSRoots) 147 baseStacks := baseSpans + uint32(work.nSpanRoots) 148 baseRescan := baseStacks + uint32(work.nStackRoots) 149 end := baseRescan + uint32(work.nRescanRoots) 150 151 // Note: if you add a case here, please also update heapdump.go:dumproots. 152 switch { 153 case baseData <= i && i < baseBSS: 154 for datap := &firstmoduledata; datap != nil; datap = datap.next { 155 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData)) 156 } 157 158 case baseBSS <= i && i < baseSpans: 159 for datap := &firstmoduledata; datap != nil; datap = datap.next { 160 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS)) 161 } 162 163 case i == fixedRootFinalizers: 164 for fb := allfin; fb != nil; fb = fb.alllink { 165 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) 166 } 167 168 case i == fixedRootFlushCaches: 169 if gcphase == _GCmarktermination { // Do not flush mcaches during concurrent phase. 170 flushallmcaches() 171 } 172 173 case i == fixedRootFreeGStacks: 174 // Only do this once per GC cycle; preferably 175 // concurrently. 176 if !work.markrootDone { 177 // Switch to the system stack so we can call 178 // stackfree. 179 systemstack(markrootFreeGStacks) 180 } 181 182 case baseSpans <= i && i < baseStacks: 183 // mark MSpan.specials 184 markrootSpans(gcw, int(i-baseSpans)) 185 186 default: 187 // the rest is scanning goroutine stacks 188 var gp *g 189 if baseStacks <= i && i < baseRescan { 190 gp = allgs[i-baseStacks] 191 } else if baseRescan <= i && i < end { 192 gp = work.rescan.list[i-baseRescan].ptr() 193 } else { 194 throw("markroot: bad index") 195 } 196 197 // remember when we've first observed the G blocked 198 // needed only to output in traceback 199 status := readgstatus(gp) // We are not in a scan state 200 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 201 gp.waitsince = work.tstart 202 } 203 204 if gcphase != _GCmarktermination && gp.startpc == gcBgMarkWorkerPC && readgstatus(gp) != _Gdead { 205 // GC background workers may be 206 // non-preemptible, so we may deadlock if we 207 // try to scan them during a concurrent phase. 208 // They also have tiny stacks, so just ignore 209 // them until mark termination. 210 gp.gcscandone = true 211 queueRescan(gp) 212 break 213 } 214 215 // scang must be done on the system stack in case 216 // we're trying to scan our own stack. 217 systemstack(func() { 218 // If this is a self-scan, put the user G in 219 // _Gwaiting to prevent self-deadlock. It may 220 // already be in _Gwaiting if this is mark 221 // termination. 222 userG := getg().m.curg 223 selfScan := gp == userG && readgstatus(userG) == _Grunning 224 if selfScan { 225 casgstatus(userG, _Grunning, _Gwaiting) 226 userG.waitreason = "garbage collection scan" 227 } 228 229 // TODO: scang blocks until gp's stack has 230 // been scanned, which may take a while for 231 // running goroutines. Consider doing this in 232 // two phases where the first is non-blocking: 233 // we scan the stacks we can and ask running 234 // goroutines to scan themselves; and the 235 // second blocks. 236 scang(gp, gcw) 237 238 if selfScan { 239 casgstatus(userG, _Gwaiting, _Grunning) 240 } 241 }) 242 } 243 } 244 245 // markrootBlock scans the shard'th shard of the block of memory [b0, 246 // b0+n0), with the given pointer mask. 247 // 248 //go:nowritebarrier 249 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { 250 if rootBlockBytes%(8*sys.PtrSize) != 0 { 251 // This is necessary to pick byte offsets in ptrmask0. 252 throw("rootBlockBytes must be a multiple of 8*ptrSize") 253 } 254 255 b := b0 + uintptr(shard)*rootBlockBytes 256 if b >= b0+n0 { 257 return 258 } 259 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize)))) 260 n := uintptr(rootBlockBytes) 261 if b+n > b0+n0 { 262 n = b0 + n0 - b 263 } 264 265 // Scan this shard. 266 scanblock(b, n, ptrmask, gcw) 267 } 268 269 // markrootFreeGStacks frees stacks of dead Gs. 270 // 271 // This does not free stacks of dead Gs cached on Ps, but having a few 272 // cached stacks around isn't a problem. 273 // 274 //TODO go:nowritebarrier 275 func markrootFreeGStacks() { 276 // Take list of dead Gs with stacks. 277 lock(&sched.gflock) 278 list := sched.gfreeStack 279 sched.gfreeStack = nil 280 unlock(&sched.gflock) 281 if list == nil { 282 return 283 } 284 285 // Free stacks. 286 tail := list 287 for gp := list; gp != nil; gp = gp.schedlink.ptr() { 288 shrinkstack(gp) 289 tail = gp 290 } 291 292 // Put Gs back on the free list. 293 lock(&sched.gflock) 294 tail.schedlink.set(sched.gfreeNoStack) 295 sched.gfreeNoStack = list 296 unlock(&sched.gflock) 297 } 298 299 // markrootSpans marks roots for one shard of work.spans. 300 // 301 //go:nowritebarrier 302 func markrootSpans(gcw *gcWork, shard int) { 303 // Objects with finalizers have two GC-related invariants: 304 // 305 // 1) Everything reachable from the object must be marked. 306 // This ensures that when we pass the object to its finalizer, 307 // everything the finalizer can reach will be retained. 308 // 309 // 2) Finalizer specials (which are not in the garbage 310 // collected heap) are roots. In practice, this means the fn 311 // field must be scanned. 312 // 313 // TODO(austin): There are several ideas for making this more 314 // efficient in issue #11485. 315 316 if work.markrootDone { 317 throw("markrootSpans during second markroot") 318 } 319 320 sg := mheap_.sweepgen 321 startSpan := shard * rootBlockSpans 322 endSpan := (shard + 1) * rootBlockSpans 323 if endSpan > len(work.spans) { 324 endSpan = len(work.spans) 325 } 326 // Note that work.spans may not include spans that were 327 // allocated between entering the scan phase and now. This is 328 // okay because any objects with finalizers in those spans 329 // must have been allocated and given finalizers after we 330 // entered the scan phase, so addfinalizer will have ensured 331 // the above invariants for them. 332 for _, s := range work.spans[startSpan:endSpan] { 333 if s.state != mSpanInUse { 334 continue 335 } 336 if !useCheckmark && s.sweepgen != sg { 337 // sweepgen was updated (+2) during non-checkmark GC pass 338 print("sweep ", s.sweepgen, " ", sg, "\n") 339 throw("gc: unswept span") 340 } 341 342 // Speculatively check if there are any specials 343 // without acquiring the span lock. This may race with 344 // adding the first special to a span, but in that 345 // case addfinalizer will observe that the GC is 346 // active (which is globally synchronized) and ensure 347 // the above invariants. We may also ensure the 348 // invariants, but it's okay to scan an object twice. 349 if s.specials == nil { 350 continue 351 } 352 353 // Lock the specials to prevent a special from being 354 // removed from the list while we're traversing it. 355 lock(&s.speciallock) 356 357 for sp := s.specials; sp != nil; sp = sp.next { 358 if sp.kind != _KindSpecialFinalizer { 359 continue 360 } 361 // don't mark finalized object, but scan it so we 362 // retain everything it points to. 363 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 364 // A finalizer can be set for an inner byte of an object, find object beginning. 365 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 366 367 // Mark everything that can be reached from 368 // the object (but *not* the object itself or 369 // we'll never collect it). 370 scanobject(p, gcw) 371 372 // The special itself is a root. 373 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw) 374 } 375 376 unlock(&s.speciallock) 377 } 378 } 379 380 // gcAssistAlloc performs GC work to make gp's assist debt positive. 381 // gp must be the calling user gorountine. 382 // 383 // This must be called with preemption enabled. 384 //go:nowritebarrier 385 func gcAssistAlloc(gp *g) { 386 // Don't assist in non-preemptible contexts. These are 387 // generally fragile and won't allow the assist to block. 388 if getg() == gp.m.g0 { 389 return 390 } 391 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 392 return 393 } 394 395 // Compute the amount of scan work we need to do to make the 396 // balance positive. We over-assist to build up credit for 397 // future allocations and amortize the cost of assisting. 398 debtBytes := -gp.gcAssistBytes + gcOverAssistBytes 399 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes)) 400 401 retry: 402 // Steal as much credit as we can from the background GC's 403 // scan credit. This is racy and may drop the background 404 // credit below 0 if two mutators steal at the same time. This 405 // will just cause steals to fail until credit is accumulated 406 // again, so in the long run it doesn't really matter, but we 407 // do have to handle the negative credit case. 408 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 409 stolen := int64(0) 410 if bgScanCredit > 0 { 411 if bgScanCredit < scanWork { 412 stolen = bgScanCredit 413 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen)) 414 } else { 415 stolen = scanWork 416 gp.gcAssistBytes += debtBytes 417 } 418 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 419 420 scanWork -= stolen 421 422 if scanWork == 0 { 423 // We were able to steal all of the credit we 424 // needed. 425 return 426 } 427 } 428 429 // Perform assist work 430 completed := false 431 systemstack(func() { 432 if atomic.Load(&gcBlackenEnabled) == 0 { 433 // The gcBlackenEnabled check in malloc races with the 434 // store that clears it but an atomic check in every malloc 435 // would be a performance hit. 436 // Instead we recheck it here on the non-preemptable system 437 // stack to determine if we should preform an assist. 438 439 // GC is done, so ignore any remaining debt. 440 gp.gcAssistBytes = 0 441 return 442 } 443 // Track time spent in this assist. Since we're on the 444 // system stack, this is non-preemptible, so we can 445 // just measure start and end time. 446 startTime := nanotime() 447 448 decnwait := atomic.Xadd(&work.nwait, -1) 449 if decnwait == work.nproc { 450 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 451 throw("nwait > work.nprocs") 452 } 453 454 // drain own cached work first in the hopes that it 455 // will be more cache friendly. 456 gcw := &getg().m.p.ptr().gcw 457 workDone := gcDrainN(gcw, scanWork) 458 // If we are near the end of the mark phase 459 // dispose of the gcw. 460 if gcBlackenPromptly { 461 gcw.dispose() 462 } 463 464 // Record that we did this much scan work. 465 // 466 // Back out the number of bytes of assist credit that 467 // this scan work counts for. The "1+" is a poor man's 468 // round-up, to ensure this adds credit even if 469 // assistBytesPerWork is very low. 470 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone)) 471 472 // If this is the last worker and we ran out of work, 473 // signal a completion point. 474 incnwait := atomic.Xadd(&work.nwait, +1) 475 if incnwait > work.nproc { 476 println("runtime: work.nwait=", incnwait, 477 "work.nproc=", work.nproc, 478 "gcBlackenPromptly=", gcBlackenPromptly) 479 throw("work.nwait > work.nproc") 480 } 481 482 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 483 // This has reached a background completion 484 // point. 485 completed = true 486 } 487 duration := nanotime() - startTime 488 _p_ := gp.m.p.ptr() 489 _p_.gcAssistTime += duration 490 if _p_.gcAssistTime > gcAssistTimeSlack { 491 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 492 _p_.gcAssistTime = 0 493 } 494 }) 495 496 if completed { 497 gcMarkDone() 498 } 499 500 if gp.gcAssistBytes < 0 { 501 // We were unable steal enough credit or perform 502 // enough work to pay off the assist debt. We need to 503 // do one of these before letting the mutator allocate 504 // more to prevent over-allocation. 505 // 506 // If this is because we were preempted, reschedule 507 // and try some more. 508 if gp.preempt { 509 Gosched() 510 goto retry 511 } 512 513 // Add this G to an assist queue and park. When the GC 514 // has more background credit, it will satisfy queued 515 // assists before flushing to the global credit pool. 516 // 517 // Note that this does *not* get woken up when more 518 // work is added to the work list. The theory is that 519 // there wasn't enough work to do anyway, so we might 520 // as well let background marking take care of the 521 // work that is available. 522 lock(&work.assistQueue.lock) 523 524 // If the GC cycle is over, just return. This is the 525 // likely path if we completed above. We do this 526 // under the lock to prevent a GC cycle from ending 527 // between this check and queuing the assist. 528 if atomic.Load(&gcBlackenEnabled) == 0 { 529 unlock(&work.assistQueue.lock) 530 return 531 } 532 533 oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail 534 if oldHead == 0 { 535 work.assistQueue.head.set(gp) 536 } else { 537 oldTail.ptr().schedlink.set(gp) 538 } 539 work.assistQueue.tail.set(gp) 540 gp.schedlink.set(nil) 541 // Recheck for background credit now that this G is in 542 // the queue, but can still back out. This avoids a 543 // race in case background marking has flushed more 544 // credit since we checked above. 545 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 546 work.assistQueue.head = oldHead 547 work.assistQueue.tail = oldTail 548 if oldTail != 0 { 549 oldTail.ptr().schedlink.set(nil) 550 } 551 unlock(&work.assistQueue.lock) 552 goto retry 553 } 554 // Park for real. 555 goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlock, 2) 556 557 // At this point either background GC has satisfied 558 // this G's assist debt, or the GC cycle is over. 559 } 560 } 561 562 // gcWakeAllAssists wakes all currently blocked assists. This is used 563 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 564 // new assists from going to sleep after this point. 565 func gcWakeAllAssists() { 566 lock(&work.assistQueue.lock) 567 injectglist(work.assistQueue.head.ptr()) 568 work.assistQueue.head.set(nil) 569 work.assistQueue.tail.set(nil) 570 unlock(&work.assistQueue.lock) 571 } 572 573 // gcFlushBgCredit flushes scanWork units of background scan work 574 // credit. This first satisfies blocked assists on the 575 // work.assistQueue and then flushes any remaining credit to 576 // gcController.bgScanCredit. 577 // 578 // Write barriers are disallowed because this is used by gcDrain after 579 // it has ensured that all work is drained and this must preserve that 580 // condition. 581 // 582 //go:nowritebarrierrec 583 func gcFlushBgCredit(scanWork int64) { 584 if work.assistQueue.head == 0 { 585 // Fast path; there are no blocked assists. There's a 586 // small window here where an assist may add itself to 587 // the blocked queue and park. If that happens, we'll 588 // just get it on the next flush. 589 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 590 return 591 } 592 593 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork) 594 595 lock(&work.assistQueue.lock) 596 gp := work.assistQueue.head.ptr() 597 for gp != nil && scanBytes > 0 { 598 // Note that gp.gcAssistBytes is negative because gp 599 // is in debt. Think carefully about the signs below. 600 if scanBytes+gp.gcAssistBytes >= 0 { 601 // Satisfy this entire assist debt. 602 scanBytes += gp.gcAssistBytes 603 gp.gcAssistBytes = 0 604 xgp := gp 605 gp = gp.schedlink.ptr() 606 // It's important that we *not* put xgp in 607 // runnext. Otherwise, it's possible for user 608 // code to exploit the GC worker's high 609 // scheduler priority to get itself always run 610 // before other goroutines and always in the 611 // fresh quantum started by GC. 612 ready(xgp, 0, false) 613 } else { 614 // Partially satisfy this assist. 615 gp.gcAssistBytes += scanBytes 616 scanBytes = 0 617 // As a heuristic, we move this assist to the 618 // back of the queue so that large assists 619 // can't clog up the assist queue and 620 // substantially delay small assists. 621 xgp := gp 622 gp = gp.schedlink.ptr() 623 if gp == nil { 624 // gp is the only assist in the queue. 625 gp = xgp 626 } else { 627 xgp.schedlink = 0 628 work.assistQueue.tail.ptr().schedlink.set(xgp) 629 work.assistQueue.tail.set(xgp) 630 } 631 break 632 } 633 } 634 work.assistQueue.head.set(gp) 635 if gp == nil { 636 work.assistQueue.tail.set(nil) 637 } 638 639 if scanBytes > 0 { 640 // Convert from scan bytes back to work. 641 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte) 642 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 643 } 644 unlock(&work.assistQueue.lock) 645 } 646 647 // scanstack scans gp's stack, greying all pointers found on the stack. 648 // 649 // During mark phase, it also installs stack barriers while traversing 650 // gp's stack. During mark termination, it stops scanning when it 651 // reaches an unhit stack barrier. 652 // 653 // scanstack is marked go:systemstack because it must not be preempted 654 // while using a workbuf. 655 // 656 //go:nowritebarrier 657 //go:systemstack 658 func scanstack(gp *g, gcw *gcWork) { 659 if gp.gcscanvalid { 660 return 661 } 662 663 if readgstatus(gp)&_Gscan == 0 { 664 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 665 throw("scanstack - bad status") 666 } 667 668 switch readgstatus(gp) &^ _Gscan { 669 default: 670 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 671 throw("mark - bad status") 672 case _Gdead: 673 return 674 case _Grunning: 675 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 676 throw("scanstack: goroutine not stopped") 677 case _Grunnable, _Gsyscall, _Gwaiting: 678 // ok 679 } 680 681 if gp == getg() { 682 throw("can't scan our own stack") 683 } 684 mp := gp.m 685 if mp != nil && mp.helpgc != 0 { 686 throw("can't scan gchelper stack") 687 } 688 689 // Shrink the stack if not much of it is being used. During 690 // concurrent GC, we can do this during concurrent mark. 691 if !work.markrootDone { 692 shrinkstack(gp) 693 } 694 695 // Prepare for stack barrier insertion/removal. 696 var sp, barrierOffset, nextBarrier uintptr 697 if gp.syscallsp != 0 { 698 sp = gp.syscallsp 699 } else { 700 sp = gp.sched.sp 701 } 702 gcLockStackBarriers(gp) // Not necessary during mark term, but harmless. 703 switch gcphase { 704 case _GCmark: 705 // Install stack barriers during stack scan. 706 barrierOffset = uintptr(firstStackBarrierOffset) 707 nextBarrier = sp + barrierOffset 708 709 if debug.gcstackbarrieroff > 0 { 710 nextBarrier = ^uintptr(0) 711 } 712 713 // Remove any existing stack barriers before we 714 // install new ones. 715 gcRemoveStackBarriers(gp) 716 717 case _GCmarktermination: 718 if !work.markrootDone { 719 // This is a STW GC. There may be stale stack 720 // barriers from an earlier cycle since we 721 // never passed through mark phase. 722 gcRemoveStackBarriers(gp) 723 } 724 725 if int(gp.stkbarPos) == len(gp.stkbar) { 726 // gp hit all of the stack barriers (or there 727 // were none). Re-scan the whole stack. 728 nextBarrier = ^uintptr(0) 729 } else { 730 // Only re-scan up to the lowest un-hit 731 // barrier. Any frames above this have not 732 // executed since the concurrent scan of gp and 733 // any writes through up-pointers to above 734 // this barrier had write barriers. 735 nextBarrier = gp.stkbar[gp.stkbarPos].savedLRPtr 736 if debugStackBarrier { 737 print("rescan below ", hex(nextBarrier), " in [", hex(sp), ",", hex(gp.stack.hi), ") goid=", gp.goid, "\n") 738 } 739 } 740 741 default: 742 throw("scanstack in wrong phase") 743 } 744 745 // Scan the stack. 746 var cache pcvalueCache 747 n := 0 748 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 749 scanframeworker(frame, &cache, gcw) 750 751 if frame.fp > nextBarrier { 752 // We skip installing a barrier on bottom-most 753 // frame because on LR machines this LR is not 754 // on the stack. 755 if gcphase == _GCmark && n != 0 { 756 if gcInstallStackBarrier(gp, frame) { 757 barrierOffset *= 2 758 nextBarrier = sp + barrierOffset 759 } 760 } else if gcphase == _GCmarktermination { 761 // We just scanned a frame containing 762 // a return to a stack barrier. Since 763 // this frame never returned, we can 764 // stop scanning. 765 return false 766 } 767 } 768 n++ 769 770 return true 771 } 772 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 773 tracebackdefers(gp, scanframe, nil) 774 gcUnlockStackBarriers(gp) 775 if gcphase == _GCmark { 776 // gp may have added itself to the rescan list between 777 // when GC started and now. It's clean now, so remove 778 // it. This isn't safe during mark termination because 779 // mark termination is consuming this list, but it's 780 // also not necessary. 781 dequeueRescan(gp) 782 } 783 gp.gcscanvalid = true 784 } 785 786 // Scan a stack frame: local variables and function arguments/results. 787 //go:nowritebarrier 788 func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { 789 790 f := frame.fn 791 targetpc := frame.continpc 792 if targetpc == 0 { 793 // Frame is dead. 794 return 795 } 796 if _DebugGC > 1 { 797 print("scanframe ", funcname(f), "\n") 798 } 799 if targetpc != f.entry { 800 targetpc-- 801 } 802 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 803 if pcdata == -1 { 804 // We do not have a valid pcdata value but there might be a 805 // stackmap for this function. It is likely that we are looking 806 // at the function prologue, assume so and hope for the best. 807 pcdata = 0 808 } 809 810 // Scan local variables if stack frame has been allocated. 811 size := frame.varp - frame.sp 812 var minsize uintptr 813 switch sys.ArchFamily { 814 case sys.ARM64: 815 minsize = sys.SpAlign 816 default: 817 minsize = sys.MinFrameSize 818 } 819 if size > minsize { 820 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 821 if stkmap == nil || stkmap.n <= 0 { 822 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 823 throw("missing stackmap") 824 } 825 826 // Locals bitmap information, scan just the pointers in locals. 827 if pcdata < 0 || pcdata >= stkmap.n { 828 // don't know where we are 829 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 830 throw("scanframe: bad symbol table") 831 } 832 bv := stackmapdata(stkmap, pcdata) 833 size = uintptr(bv.n) * sys.PtrSize 834 scanblock(frame.varp-size, size, bv.bytedata, gcw) 835 } 836 837 // Scan arguments. 838 if frame.arglen > 0 { 839 var bv bitvector 840 if frame.argmap != nil { 841 bv = *frame.argmap 842 } else { 843 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 844 if stkmap == nil || stkmap.n <= 0 { 845 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 846 throw("missing stackmap") 847 } 848 if pcdata < 0 || pcdata >= stkmap.n { 849 // don't know where we are 850 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 851 throw("scanframe: bad symbol table") 852 } 853 bv = stackmapdata(stkmap, pcdata) 854 } 855 scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw) 856 } 857 } 858 859 // queueRescan adds gp to the stack rescan list and clears 860 // gp.gcscanvalid. The caller must own gp and ensure that gp isn't 861 // already on the rescan list. 862 func queueRescan(gp *g) { 863 if gcphase == _GCoff { 864 gp.gcscanvalid = false 865 return 866 } 867 if gp.gcRescan != -1 { 868 throw("g already on rescan list") 869 } 870 871 lock(&work.rescan.lock) 872 gp.gcscanvalid = false 873 874 // Recheck gcphase under the lock in case there was a phase change. 875 if gcphase == _GCoff { 876 unlock(&work.rescan.lock) 877 return 878 } 879 if len(work.rescan.list) == cap(work.rescan.list) { 880 throw("rescan list overflow") 881 } 882 n := len(work.rescan.list) 883 gp.gcRescan = int32(n) 884 work.rescan.list = work.rescan.list[:n+1] 885 work.rescan.list[n].set(gp) 886 unlock(&work.rescan.lock) 887 } 888 889 // dequeueRescan removes gp from the stack rescan list, if gp is on 890 // the rescan list. The caller must own gp. 891 func dequeueRescan(gp *g) { 892 if gp.gcRescan == -1 { 893 return 894 } 895 if gcphase == _GCoff { 896 gp.gcRescan = -1 897 return 898 } 899 900 lock(&work.rescan.lock) 901 if work.rescan.list[gp.gcRescan].ptr() != gp { 902 throw("bad dequeueRescan") 903 } 904 // Careful: gp may itself be the last G on the list. 905 last := work.rescan.list[len(work.rescan.list)-1] 906 work.rescan.list[gp.gcRescan] = last 907 last.ptr().gcRescan = gp.gcRescan 908 gp.gcRescan = -1 909 work.rescan.list = work.rescan.list[:len(work.rescan.list)-1] 910 unlock(&work.rescan.lock) 911 } 912 913 type gcDrainFlags int 914 915 const ( 916 gcDrainUntilPreempt gcDrainFlags = 1 << iota 917 gcDrainNoBlock 918 gcDrainFlushBgCredit 919 920 // gcDrainBlock means neither gcDrainUntilPreempt or 921 // gcDrainNoBlock. It is the default, but callers should use 922 // the constant for documentation purposes. 923 gcDrainBlock gcDrainFlags = 0 924 ) 925 926 // gcDrain scans roots and objects in work buffers, blackening grey 927 // objects until all roots and work buffers have been drained. 928 // 929 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 930 // is set. This implies gcDrainNoBlock. 931 // 932 // If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is 933 // unable to get more work. Otherwise, it will block until all 934 // blocking calls are blocked in gcDrain. 935 // 936 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 937 // credit to gcController.bgScanCredit every gcCreditSlack units of 938 // scan work. 939 // 940 //go:nowritebarrier 941 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 942 if !writeBarrier.needed { 943 throw("gcDrain phase incorrect") 944 } 945 946 gp := getg() 947 preemptible := flags&gcDrainUntilPreempt != 0 948 blocking := flags&(gcDrainUntilPreempt|gcDrainNoBlock) == 0 949 flushBgCredit := flags&gcDrainFlushBgCredit != 0 950 951 // Drain root marking jobs. 952 if work.markrootNext < work.markrootJobs { 953 for blocking || !gp.preempt { 954 job := atomic.Xadd(&work.markrootNext, +1) - 1 955 if job >= work.markrootJobs { 956 break 957 } 958 markroot(gcw, job) 959 } 960 } 961 962 initScanWork := gcw.scanWork 963 964 // Drain heap marking jobs. 965 for !(preemptible && gp.preempt) { 966 // Try to keep work available on the global queue. We used to 967 // check if there were waiting workers, but it's better to 968 // just keep work available than to make workers wait. In the 969 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 970 // balances. 971 if work.full == 0 { 972 gcw.balance() 973 } 974 975 var b uintptr 976 if blocking { 977 b = gcw.get() 978 } else { 979 b = gcw.tryGetFast() 980 if b == 0 { 981 b = gcw.tryGet() 982 } 983 } 984 if b == 0 { 985 // work barrier reached or tryGet failed. 986 break 987 } 988 scanobject(b, gcw) 989 990 // Flush background scan work credit to the global 991 // account if we've accumulated enough locally so 992 // mutator assists can draw on it. 993 if gcw.scanWork >= gcCreditSlack { 994 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 995 if flushBgCredit { 996 gcFlushBgCredit(gcw.scanWork - initScanWork) 997 initScanWork = 0 998 } 999 gcw.scanWork = 0 1000 } 1001 } 1002 1003 // In blocking mode, write barriers are not allowed after this 1004 // point because we must preserve the condition that the work 1005 // buffers are empty. 1006 1007 // Flush remaining scan work credit. 1008 if gcw.scanWork > 0 { 1009 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1010 if flushBgCredit { 1011 gcFlushBgCredit(gcw.scanWork - initScanWork) 1012 } 1013 gcw.scanWork = 0 1014 } 1015 } 1016 1017 // gcDrainN blackens grey objects until it has performed roughly 1018 // scanWork units of scan work or the G is preempted. This is 1019 // best-effort, so it may perform less work if it fails to get a work 1020 // buffer. Otherwise, it will perform at least n units of work, but 1021 // may perform more because scanning is always done in whole object 1022 // increments. It returns the amount of scan work performed. 1023 //go:nowritebarrier 1024 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 1025 if !writeBarrier.needed { 1026 throw("gcDrainN phase incorrect") 1027 } 1028 1029 // There may already be scan work on the gcw, which we don't 1030 // want to claim was done by this call. 1031 workFlushed := -gcw.scanWork 1032 1033 gp := getg().m.curg 1034 for !gp.preempt && workFlushed+gcw.scanWork < scanWork { 1035 // See gcDrain comment. 1036 if work.full == 0 { 1037 gcw.balance() 1038 } 1039 1040 // This might be a good place to add prefetch code... 1041 // if(wbuf.nobj > 4) { 1042 // PREFETCH(wbuf->obj[wbuf.nobj - 3]; 1043 // } 1044 // 1045 b := gcw.tryGetFast() 1046 if b == 0 { 1047 b = gcw.tryGet() 1048 } 1049 1050 if b == 0 { 1051 break 1052 } 1053 scanobject(b, gcw) 1054 1055 // Flush background scan work credit. 1056 if gcw.scanWork >= gcCreditSlack { 1057 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1058 workFlushed += gcw.scanWork 1059 gcw.scanWork = 0 1060 } 1061 } 1062 1063 // Unlike gcDrain, there's no need to flush remaining work 1064 // here because this never flushes to bgScanCredit and 1065 // gcw.dispose will flush any remaining work to scanWork. 1066 1067 return workFlushed + gcw.scanWork 1068 } 1069 1070 // scanblock scans b as scanobject would, but using an explicit 1071 // pointer bitmap instead of the heap bitmap. 1072 // 1073 // This is used to scan non-heap roots, so it does not update 1074 // gcw.bytesMarked or gcw.scanWork. 1075 // 1076 //go:nowritebarrier 1077 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { 1078 // Use local copies of original parameters, so that a stack trace 1079 // due to one of the throws below shows the original block 1080 // base and extent. 1081 b := b0 1082 n := n0 1083 1084 arena_start := mheap_.arena_start 1085 arena_used := mheap_.arena_used 1086 1087 for i := uintptr(0); i < n; { 1088 // Find bits for the next word. 1089 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8))) 1090 if bits == 0 { 1091 i += sys.PtrSize * 8 1092 continue 1093 } 1094 for j := 0; j < 8 && i < n; j++ { 1095 if bits&1 != 0 { 1096 // Same work as in scanobject; see comments there. 1097 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1098 if obj != 0 && arena_start <= obj && obj < arena_used { 1099 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1100 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1101 } 1102 } 1103 } 1104 bits >>= 1 1105 i += sys.PtrSize 1106 } 1107 } 1108 } 1109 1110 // scanobject scans the object starting at b, adding pointers to gcw. 1111 // b must point to the beginning of a heap object; scanobject consults 1112 // the GC bitmap for the pointer mask and the spans for the size of the 1113 // object. 1114 //go:nowritebarrier 1115 func scanobject(b uintptr, gcw *gcWork) { 1116 // Note that arena_used may change concurrently during 1117 // scanobject and hence scanobject may encounter a pointer to 1118 // a newly allocated heap object that is *not* in 1119 // [start,used). It will not mark this object; however, we 1120 // know that it was just installed by a mutator, which means 1121 // that mutator will execute a write barrier and take care of 1122 // marking it. This is even more pronounced on relaxed memory 1123 // architectures since we access arena_used without barriers 1124 // or synchronization, but the same logic applies. 1125 arena_start := mheap_.arena_start 1126 arena_used := mheap_.arena_used 1127 1128 // Find bits of the beginning of the object. 1129 // b must point to the beginning of a heap object, so 1130 // we can get its bits and span directly. 1131 hbits := heapBitsForAddr(b) 1132 s := spanOfUnchecked(b) 1133 n := s.elemsize 1134 if n == 0 { 1135 throw("scanobject n == 0") 1136 } 1137 1138 var i uintptr 1139 for i = 0; i < n; i += sys.PtrSize { 1140 // Find bits for this word. 1141 if i != 0 { 1142 // Avoid needless hbits.next() on last iteration. 1143 hbits = hbits.next() 1144 } 1145 // During checkmarking, 1-word objects store the checkmark 1146 // in the type bit for the one word. The only one-word objects 1147 // are pointers, or else they'd be merged with other non-pointer 1148 // data into larger allocations. 1149 if i != 1*sys.PtrSize && !hbits.morePointers() { 1150 break // no more pointers in this object 1151 } 1152 if !hbits.isPointer() { 1153 continue // not a pointer 1154 } 1155 1156 // Work here is duplicated in scanblock and above. 1157 // If you make changes here, make changes there too. 1158 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1159 1160 // At this point we have extracted the next potential pointer. 1161 // Check if it points into heap and not back at the current object. 1162 if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n { 1163 // Mark the object. 1164 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1165 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1166 } 1167 } 1168 } 1169 gcw.bytesMarked += uint64(n) 1170 gcw.scanWork += int64(i) 1171 } 1172 1173 // Shade the object if it isn't already. 1174 // The object is not nil and known to be in the heap. 1175 // Preemption must be disabled. 1176 //go:nowritebarrier 1177 func shade(b uintptr) { 1178 if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 { 1179 gcw := &getg().m.p.ptr().gcw 1180 greyobject(obj, 0, 0, hbits, span, gcw, objIndex) 1181 if gcphase == _GCmarktermination || gcBlackenPromptly { 1182 // Ps aren't allowed to cache work during mark 1183 // termination. 1184 gcw.dispose() 1185 } 1186 } 1187 } 1188 1189 // obj is the start of an object with mark mbits. 1190 // If it isn't already marked, mark it and enqueue into gcw. 1191 // base and off are for debugging only and could be removed. 1192 //go:nowritebarrierrec 1193 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) { 1194 // obj should be start of allocation, and so must be at least pointer-aligned. 1195 if obj&(sys.PtrSize-1) != 0 { 1196 throw("greyobject: obj not pointer-aligned") 1197 } 1198 mbits := span.markBitsForIndex(objIndex) 1199 1200 if useCheckmark { 1201 if !mbits.isMarked() { 1202 printlock() 1203 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n") 1204 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") 1205 1206 // Dump the source (base) object 1207 gcDumpObject("base", base, off) 1208 1209 // Dump the object 1210 gcDumpObject("obj", obj, ^uintptr(0)) 1211 1212 throw("checkmark found unmarked object") 1213 } 1214 if hbits.isCheckmarked(span.elemsize) { 1215 return 1216 } 1217 hbits.setCheckmarked(span.elemsize) 1218 if !hbits.isCheckmarked(span.elemsize) { 1219 throw("setCheckmarked and isCheckmarked disagree") 1220 } 1221 } else { 1222 // If marked we have nothing to do. 1223 if mbits.isMarked() { 1224 return 1225 } 1226 // mbits.setMarked() // Avoid extra call overhead with manual inlining. 1227 atomic.Or8(mbits.bytep, mbits.mask) 1228 // If this is a noscan object, fast-track it to black 1229 // instead of greying it. 1230 if !hbits.hasPointers(span.elemsize) { 1231 gcw.bytesMarked += uint64(span.elemsize) 1232 return 1233 } 1234 } 1235 1236 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but 1237 // seems like a nice optimization that can be added back in. 1238 // There needs to be time between the PREFETCH and the use. 1239 // Previously we put the obj in an 8 element buffer that is drained at a rate 1240 // to give the PREFETCH time to do its work. 1241 // Use of PREFETCHNTA might be more appropriate than PREFETCH 1242 if !gcw.putFast(obj) { 1243 gcw.put(obj) 1244 } 1245 } 1246 1247 // gcDumpObject dumps the contents of obj for debugging and marks the 1248 // field at byte offset off in obj. 1249 func gcDumpObject(label string, obj, off uintptr) { 1250 if obj < mheap_.arena_start || obj >= mheap_.arena_used { 1251 print(label, "=", hex(obj), " is not in the Go heap\n") 1252 return 1253 } 1254 k := obj >> _PageShift 1255 x := k 1256 x -= mheap_.arena_start >> _PageShift 1257 s := h_spans[x] 1258 print(label, "=", hex(obj), " k=", hex(k)) 1259 if s == nil { 1260 print(" s=nil\n") 1261 return 1262 } 1263 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n") 1264 skipped := false 1265 for i := uintptr(0); i < s.elemsize; i += sys.PtrSize { 1266 // For big objects, just print the beginning (because 1267 // that usually hints at the object's type) and the 1268 // fields around off. 1269 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) { 1270 skipped = true 1271 continue 1272 } 1273 if skipped { 1274 print(" ...\n") 1275 skipped = false 1276 } 1277 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1278 if i == off { 1279 print(" <==") 1280 } 1281 print("\n") 1282 } 1283 if skipped { 1284 print(" ...\n") 1285 } 1286 } 1287 1288 // gcmarknewobject marks a newly allocated object black. obj must 1289 // not contain any non-nil pointers. 1290 // 1291 // This is nosplit so it can manipulate a gcWork without preemption. 1292 // 1293 //go:nowritebarrier 1294 //go:nosplit 1295 func gcmarknewobject(obj, size, scanSize uintptr) { 1296 if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen. 1297 throw("gcmarknewobject called while doing checkmark") 1298 } 1299 markBitsForAddr(obj).setMarked() 1300 gcw := &getg().m.p.ptr().gcw 1301 gcw.bytesMarked += uint64(size) 1302 gcw.scanWork += int64(scanSize) 1303 } 1304 1305 // Checkmarking 1306 1307 // To help debug the concurrent GC we remark with the world 1308 // stopped ensuring that any object encountered has their normal 1309 // mark bit set. To do this we use an orthogonal bit 1310 // pattern to indicate the object is marked. The following pattern 1311 // uses the upper two bits in the object's boundary nibble. 1312 // 01: scalar not marked 1313 // 10: pointer not marked 1314 // 11: pointer marked 1315 // 00: scalar marked 1316 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa. 1317 // The higher bit is 1 for pointers and 0 for scalars, whether the object 1318 // is marked or not. 1319 // The first nibble no longer holds the typeDead pattern indicating that the 1320 // there are no more pointers in the object. This information is held 1321 // in the second nibble. 1322 1323 // If useCheckmark is true, marking of an object uses the 1324 // checkmark bits (encoding above) instead of the standard 1325 // mark bits. 1326 var useCheckmark = false 1327 1328 //go:nowritebarrier 1329 func initCheckmarks() { 1330 useCheckmark = true 1331 for _, s := range work.spans { 1332 if s.state == _MSpanInUse { 1333 heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout()) 1334 } 1335 } 1336 } 1337 1338 func clearCheckmarks() { 1339 useCheckmark = false 1340 for _, s := range work.spans { 1341 if s.state == _MSpanInUse { 1342 heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout()) 1343 } 1344 } 1345 }