github.com/AESNooper/go/src@v0.0.0-20220218095104-b56a4ab1bbbb/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "internal/goarch" 11 "internal/goexperiment" 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 const ( 18 fixedRootFinalizers = iota 19 fixedRootFreeGStacks 20 fixedRootCount 21 22 // rootBlockBytes is the number of bytes to scan per data or 23 // BSS root. 24 rootBlockBytes = 256 << 10 25 26 // maxObletBytes is the maximum bytes of an object to scan at 27 // once. Larger objects will be split up into "oblets" of at 28 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds 29 // scan preemption at ~100 µs. 30 // 31 // This must be > _MaxSmallSize so that the object base is the 32 // span base. 33 maxObletBytes = 128 << 10 34 35 // drainCheckThreshold specifies how many units of work to do 36 // between self-preemption checks in gcDrain. Assuming a scan 37 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher 38 // overhead in the scan loop (the scheduler check may perform 39 // a syscall, so its overhead is nontrivial). Higher values 40 // make the system less responsive to incoming work. 41 drainCheckThreshold = 100000 42 43 // pagesPerSpanRoot indicates how many pages to scan from a span root 44 // at a time. Used by special root marking. 45 // 46 // Higher values improve throughput by increasing locality, but 47 // increase the minimum latency of a marking operation. 48 // 49 // Must be a multiple of the pageInUse bitmap element size and 50 // must also evenly divide pagesPerArena. 51 pagesPerSpanRoot = 512 52 ) 53 54 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 55 // some miscellany) and initializes scanning-related state. 56 // 57 // The world must be stopped. 58 func gcMarkRootPrepare() { 59 assertWorldStopped() 60 61 // Compute how many data and BSS root blocks there are. 62 nBlocks := func(bytes uintptr) int { 63 return int(divRoundUp(bytes, rootBlockBytes)) 64 } 65 66 work.nDataRoots = 0 67 work.nBSSRoots = 0 68 69 // Scan globals. 70 for _, datap := range activeModules() { 71 nDataRoots := nBlocks(datap.edata - datap.data) 72 if nDataRoots > work.nDataRoots { 73 work.nDataRoots = nDataRoots 74 } 75 } 76 77 for _, datap := range activeModules() { 78 nBSSRoots := nBlocks(datap.ebss - datap.bss) 79 if nBSSRoots > work.nBSSRoots { 80 work.nBSSRoots = nBSSRoots 81 } 82 } 83 84 // Scan span roots for finalizer specials. 85 // 86 // We depend on addfinalizer to mark objects that get 87 // finalizers after root marking. 88 // 89 // We're going to scan the whole heap (that was available at the time the 90 // mark phase started, i.e. markArenas) for in-use spans which have specials. 91 // 92 // Break up the work into arenas, and further into chunks. 93 // 94 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas 95 // is append-only. 96 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)] 97 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot) 98 99 // Scan stacks. 100 // 101 // Gs may be created after this point, but it's okay that we 102 // ignore them because they begin life without any roots, so 103 // there's nothing to scan, and any roots they create during 104 // the concurrent phase will be caught by the write barrier. 105 work.stackRoots = allGsSnapshot() 106 work.nStackRoots = len(work.stackRoots) 107 108 work.markrootNext = 0 109 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) 110 111 // Calculate base indexes of each root type 112 work.baseData = uint32(fixedRootCount) 113 work.baseBSS = work.baseData + uint32(work.nDataRoots) 114 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots) 115 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots) 116 work.baseEnd = work.baseStacks + uint32(work.nStackRoots) 117 } 118 119 // gcMarkRootCheck checks that all roots have been scanned. It is 120 // purely for debugging. 121 func gcMarkRootCheck() { 122 if work.markrootNext < work.markrootJobs { 123 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 124 throw("left over markroot jobs") 125 } 126 127 // Check that stacks have been scanned. 128 // 129 // We only check the first nStackRoots Gs that we should have scanned. 130 // Since we don't care about newer Gs (see comment in 131 // gcMarkRootPrepare), no locking is required. 132 i := 0 133 forEachGRace(func(gp *g) { 134 if i >= work.nStackRoots { 135 return 136 } 137 138 if !gp.gcscandone { 139 println("gp", gp, "goid", gp.goid, 140 "status", readgstatus(gp), 141 "gcscandone", gp.gcscandone) 142 throw("scan missed a g") 143 } 144 145 i++ 146 }) 147 } 148 149 // ptrmask for an allocation containing a single pointer. 150 var oneptrmask = [...]uint8{1} 151 152 // markroot scans the i'th root. 153 // 154 // Preemption must be disabled (because this uses a gcWork). 155 // 156 // Returns the amount of GC work credit produced by the operation. 157 // If flushBgCredit is true, then that credit is also flushed 158 // to the background credit pool. 159 // 160 // nowritebarrier is only advisory here. 161 // 162 //go:nowritebarrier 163 func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 { 164 // Note: if you add a case here, please also update heapdump.go:dumproots. 165 var workDone int64 166 var workCounter *atomic.Int64 167 switch { 168 case work.baseData <= i && i < work.baseBSS: 169 workCounter = &gcController.globalsScanWork 170 for _, datap := range activeModules() { 171 workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData)) 172 } 173 174 case work.baseBSS <= i && i < work.baseSpans: 175 workCounter = &gcController.globalsScanWork 176 for _, datap := range activeModules() { 177 workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS)) 178 } 179 180 case i == fixedRootFinalizers: 181 for fb := allfin; fb != nil; fb = fb.alllink { 182 cnt := uintptr(atomic.Load(&fb.cnt)) 183 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) 184 } 185 186 case i == fixedRootFreeGStacks: 187 // Switch to the system stack so we can call 188 // stackfree. 189 systemstack(markrootFreeGStacks) 190 191 case work.baseSpans <= i && i < work.baseStacks: 192 // mark mspan.specials 193 markrootSpans(gcw, int(i-work.baseSpans)) 194 195 default: 196 // the rest is scanning goroutine stacks 197 workCounter = &gcController.stackScanWork 198 if i < work.baseStacks || work.baseEnd <= i { 199 printlock() 200 print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n") 201 throw("markroot: bad index") 202 } 203 gp := work.stackRoots[i-work.baseStacks] 204 205 // remember when we've first observed the G blocked 206 // needed only to output in traceback 207 status := readgstatus(gp) // We are not in a scan state 208 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 209 gp.waitsince = work.tstart 210 } 211 212 // scanstack must be done on the system stack in case 213 // we're trying to scan our own stack. 214 systemstack(func() { 215 // If this is a self-scan, put the user G in 216 // _Gwaiting to prevent self-deadlock. It may 217 // already be in _Gwaiting if this is a mark 218 // worker or we're in mark termination. 219 userG := getg().m.curg 220 selfScan := gp == userG && readgstatus(userG) == _Grunning 221 if selfScan { 222 casgstatus(userG, _Grunning, _Gwaiting) 223 userG.waitreason = waitReasonGarbageCollectionScan 224 } 225 226 // TODO: suspendG blocks (and spins) until gp 227 // stops, which may take a while for 228 // running goroutines. Consider doing this in 229 // two phases where the first is non-blocking: 230 // we scan the stacks we can and ask running 231 // goroutines to scan themselves; and the 232 // second blocks. 233 stopped := suspendG(gp) 234 if stopped.dead { 235 gp.gcscandone = true 236 return 237 } 238 if gp.gcscandone { 239 throw("g already scanned") 240 } 241 workDone += scanstack(gp, gcw) 242 gp.gcscandone = true 243 resumeG(stopped) 244 245 if selfScan { 246 casgstatus(userG, _Gwaiting, _Grunning) 247 } 248 }) 249 } 250 if goexperiment.PacerRedesign { 251 if workCounter != nil && workDone != 0 { 252 workCounter.Add(workDone) 253 if flushBgCredit { 254 gcFlushBgCredit(workDone) 255 } 256 } 257 } 258 return workDone 259 } 260 261 // markrootBlock scans the shard'th shard of the block of memory [b0, 262 // b0+n0), with the given pointer mask. 263 // 264 // Returns the amount of work done. 265 // 266 //go:nowritebarrier 267 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 { 268 if rootBlockBytes%(8*goarch.PtrSize) != 0 { 269 // This is necessary to pick byte offsets in ptrmask0. 270 throw("rootBlockBytes must be a multiple of 8*ptrSize") 271 } 272 273 // Note that if b0 is toward the end of the address space, 274 // then b0 + rootBlockBytes might wrap around. 275 // These tests are written to avoid any possible overflow. 276 off := uintptr(shard) * rootBlockBytes 277 if off >= n0 { 278 return 0 279 } 280 b := b0 + off 281 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize)))) 282 n := uintptr(rootBlockBytes) 283 if off+n > n0 { 284 n = n0 - off 285 } 286 287 // Scan this shard. 288 scanblock(b, n, ptrmask, gcw, nil) 289 return int64(n) 290 } 291 292 // markrootFreeGStacks frees stacks of dead Gs. 293 // 294 // This does not free stacks of dead Gs cached on Ps, but having a few 295 // cached stacks around isn't a problem. 296 func markrootFreeGStacks() { 297 // Take list of dead Gs with stacks. 298 lock(&sched.gFree.lock) 299 list := sched.gFree.stack 300 sched.gFree.stack = gList{} 301 unlock(&sched.gFree.lock) 302 if list.empty() { 303 return 304 } 305 306 // Free stacks. 307 q := gQueue{list.head, list.head} 308 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 309 stackfree(gp.stack) 310 gp.stack.lo = 0 311 gp.stack.hi = 0 312 // Manipulate the queue directly since the Gs are 313 // already all linked the right way. 314 q.tail.set(gp) 315 } 316 317 // Put Gs back on the free list. 318 lock(&sched.gFree.lock) 319 sched.gFree.noStack.pushAll(q) 320 unlock(&sched.gFree.lock) 321 } 322 323 // markrootSpans marks roots for one shard of markArenas. 324 // 325 //go:nowritebarrier 326 func markrootSpans(gcw *gcWork, shard int) { 327 // Objects with finalizers have two GC-related invariants: 328 // 329 // 1) Everything reachable from the object must be marked. 330 // This ensures that when we pass the object to its finalizer, 331 // everything the finalizer can reach will be retained. 332 // 333 // 2) Finalizer specials (which are not in the garbage 334 // collected heap) are roots. In practice, this means the fn 335 // field must be scanned. 336 sg := mheap_.sweepgen 337 338 // Find the arena and page index into that arena for this shard. 339 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)] 340 ha := mheap_.arenas[ai.l1()][ai.l2()] 341 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena) 342 343 // Construct slice of bitmap which we'll iterate over. 344 specialsbits := ha.pageSpecials[arenaPage/8:] 345 specialsbits = specialsbits[:pagesPerSpanRoot/8] 346 for i := range specialsbits { 347 // Find set bits, which correspond to spans with specials. 348 specials := atomic.Load8(&specialsbits[i]) 349 if specials == 0 { 350 continue 351 } 352 for j := uint(0); j < 8; j++ { 353 if specials&(1<<j) == 0 { 354 continue 355 } 356 // Find the span for this bit. 357 // 358 // This value is guaranteed to be non-nil because having 359 // specials implies that the span is in-use, and since we're 360 // currently marking we can be sure that we don't have to worry 361 // about the span being freed and re-used. 362 s := ha.spans[arenaPage+uint(i)*8+j] 363 364 // The state must be mSpanInUse if the specials bit is set, so 365 // sanity check that. 366 if state := s.state.get(); state != mSpanInUse { 367 print("s.state = ", state, "\n") 368 throw("non in-use span found with specials bit set") 369 } 370 // Check that this span was swept (it may be cached or uncached). 371 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { 372 // sweepgen was updated (+2) during non-checkmark GC pass 373 print("sweep ", s.sweepgen, " ", sg, "\n") 374 throw("gc: unswept span") 375 } 376 377 // Lock the specials to prevent a special from being 378 // removed from the list while we're traversing it. 379 lock(&s.speciallock) 380 for sp := s.specials; sp != nil; sp = sp.next { 381 if sp.kind != _KindSpecialFinalizer { 382 continue 383 } 384 // don't mark finalized object, but scan it so we 385 // retain everything it points to. 386 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 387 // A finalizer can be set for an inner byte of an object, find object beginning. 388 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 389 390 // Mark everything that can be reached from 391 // the object (but *not* the object itself or 392 // we'll never collect it). 393 scanobject(p, gcw) 394 395 // The special itself is a root. 396 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) 397 } 398 unlock(&s.speciallock) 399 } 400 } 401 } 402 403 // gcAssistAlloc performs GC work to make gp's assist debt positive. 404 // gp must be the calling user gorountine. 405 // 406 // This must be called with preemption enabled. 407 func gcAssistAlloc(gp *g) { 408 // Don't assist in non-preemptible contexts. These are 409 // generally fragile and won't allow the assist to block. 410 if getg() == gp.m.g0 { 411 return 412 } 413 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 414 return 415 } 416 417 traced := false 418 retry: 419 // Compute the amount of scan work we need to do to make the 420 // balance positive. When the required amount of work is low, 421 // we over-assist to build up credit for future allocations 422 // and amortize the cost of assisting. 423 assistWorkPerByte := gcController.assistWorkPerByte.Load() 424 assistBytesPerWork := gcController.assistBytesPerWork.Load() 425 debtBytes := -gp.gcAssistBytes 426 scanWork := int64(assistWorkPerByte * float64(debtBytes)) 427 if scanWork < gcOverAssistWork { 428 scanWork = gcOverAssistWork 429 debtBytes = int64(assistBytesPerWork * float64(scanWork)) 430 } 431 432 // Steal as much credit as we can from the background GC's 433 // scan credit. This is racy and may drop the background 434 // credit below 0 if two mutators steal at the same time. This 435 // will just cause steals to fail until credit is accumulated 436 // again, so in the long run it doesn't really matter, but we 437 // do have to handle the negative credit case. 438 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 439 stolen := int64(0) 440 if bgScanCredit > 0 { 441 if bgScanCredit < scanWork { 442 stolen = bgScanCredit 443 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen)) 444 } else { 445 stolen = scanWork 446 gp.gcAssistBytes += debtBytes 447 } 448 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 449 450 scanWork -= stolen 451 452 if scanWork == 0 { 453 // We were able to steal all of the credit we 454 // needed. 455 if traced { 456 traceGCMarkAssistDone() 457 } 458 return 459 } 460 } 461 462 if trace.enabled && !traced { 463 traced = true 464 traceGCMarkAssistStart() 465 } 466 467 // Perform assist work 468 systemstack(func() { 469 gcAssistAlloc1(gp, scanWork) 470 // The user stack may have moved, so this can't touch 471 // anything on it until it returns from systemstack. 472 }) 473 474 completed := gp.param != nil 475 gp.param = nil 476 if completed { 477 gcMarkDone() 478 } 479 480 if gp.gcAssistBytes < 0 { 481 // We were unable steal enough credit or perform 482 // enough work to pay off the assist debt. We need to 483 // do one of these before letting the mutator allocate 484 // more to prevent over-allocation. 485 // 486 // If this is because we were preempted, reschedule 487 // and try some more. 488 if gp.preempt { 489 Gosched() 490 goto retry 491 } 492 493 // Add this G to an assist queue and park. When the GC 494 // has more background credit, it will satisfy queued 495 // assists before flushing to the global credit pool. 496 // 497 // Note that this does *not* get woken up when more 498 // work is added to the work list. The theory is that 499 // there wasn't enough work to do anyway, so we might 500 // as well let background marking take care of the 501 // work that is available. 502 if !gcParkAssist() { 503 goto retry 504 } 505 506 // At this point either background GC has satisfied 507 // this G's assist debt, or the GC cycle is over. 508 } 509 if traced { 510 traceGCMarkAssistDone() 511 } 512 } 513 514 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system 515 // stack. This is a separate function to make it easier to see that 516 // we're not capturing anything from the user stack, since the user 517 // stack may move while we're in this function. 518 // 519 // gcAssistAlloc1 indicates whether this assist completed the mark 520 // phase by setting gp.param to non-nil. This can't be communicated on 521 // the stack since it may move. 522 // 523 //go:systemstack 524 func gcAssistAlloc1(gp *g, scanWork int64) { 525 // Clear the flag indicating that this assist completed the 526 // mark phase. 527 gp.param = nil 528 529 if atomic.Load(&gcBlackenEnabled) == 0 { 530 // The gcBlackenEnabled check in malloc races with the 531 // store that clears it but an atomic check in every malloc 532 // would be a performance hit. 533 // Instead we recheck it here on the non-preemptable system 534 // stack to determine if we should perform an assist. 535 536 // GC is done, so ignore any remaining debt. 537 gp.gcAssistBytes = 0 538 return 539 } 540 // Track time spent in this assist. Since we're on the 541 // system stack, this is non-preemptible, so we can 542 // just measure start and end time. 543 startTime := nanotime() 544 545 decnwait := atomic.Xadd(&work.nwait, -1) 546 if decnwait == work.nproc { 547 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 548 throw("nwait > work.nprocs") 549 } 550 551 // gcDrainN requires the caller to be preemptible. 552 casgstatus(gp, _Grunning, _Gwaiting) 553 gp.waitreason = waitReasonGCAssistMarking 554 555 // drain own cached work first in the hopes that it 556 // will be more cache friendly. 557 gcw := &getg().m.p.ptr().gcw 558 workDone := gcDrainN(gcw, scanWork) 559 560 casgstatus(gp, _Gwaiting, _Grunning) 561 562 // Record that we did this much scan work. 563 // 564 // Back out the number of bytes of assist credit that 565 // this scan work counts for. The "1+" is a poor man's 566 // round-up, to ensure this adds credit even if 567 // assistBytesPerWork is very low. 568 assistBytesPerWork := gcController.assistBytesPerWork.Load() 569 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone)) 570 571 // If this is the last worker and we ran out of work, 572 // signal a completion point. 573 incnwait := atomic.Xadd(&work.nwait, +1) 574 if incnwait > work.nproc { 575 println("runtime: work.nwait=", incnwait, 576 "work.nproc=", work.nproc) 577 throw("work.nwait > work.nproc") 578 } 579 580 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 581 // This has reached a background completion point. Set 582 // gp.param to a non-nil value to indicate this. It 583 // doesn't matter what we set it to (it just has to be 584 // a valid pointer). 585 gp.param = unsafe.Pointer(gp) 586 } 587 duration := nanotime() - startTime 588 _p_ := gp.m.p.ptr() 589 _p_.gcAssistTime += duration 590 if _p_.gcAssistTime > gcAssistTimeSlack { 591 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 592 _p_.gcAssistTime = 0 593 } 594 } 595 596 // gcWakeAllAssists wakes all currently blocked assists. This is used 597 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 598 // new assists from going to sleep after this point. 599 func gcWakeAllAssists() { 600 lock(&work.assistQueue.lock) 601 list := work.assistQueue.q.popList() 602 injectglist(&list) 603 unlock(&work.assistQueue.lock) 604 } 605 606 // gcParkAssist puts the current goroutine on the assist queue and parks. 607 // 608 // gcParkAssist reports whether the assist is now satisfied. If it 609 // returns false, the caller must retry the assist. 610 func gcParkAssist() bool { 611 lock(&work.assistQueue.lock) 612 // If the GC cycle finished while we were getting the lock, 613 // exit the assist. The cycle can't finish while we hold the 614 // lock. 615 if atomic.Load(&gcBlackenEnabled) == 0 { 616 unlock(&work.assistQueue.lock) 617 return true 618 } 619 620 gp := getg() 621 oldList := work.assistQueue.q 622 work.assistQueue.q.pushBack(gp) 623 624 // Recheck for background credit now that this G is in 625 // the queue, but can still back out. This avoids a 626 // race in case background marking has flushed more 627 // credit since we checked above. 628 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 629 work.assistQueue.q = oldList 630 if oldList.tail != 0 { 631 oldList.tail.ptr().schedlink.set(nil) 632 } 633 unlock(&work.assistQueue.lock) 634 return false 635 } 636 // Park. 637 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2) 638 return true 639 } 640 641 // gcFlushBgCredit flushes scanWork units of background scan work 642 // credit. This first satisfies blocked assists on the 643 // work.assistQueue and then flushes any remaining credit to 644 // gcController.bgScanCredit. 645 // 646 // Write barriers are disallowed because this is used by gcDrain after 647 // it has ensured that all work is drained and this must preserve that 648 // condition. 649 // 650 //go:nowritebarrierrec 651 func gcFlushBgCredit(scanWork int64) { 652 if work.assistQueue.q.empty() { 653 // Fast path; there are no blocked assists. There's a 654 // small window here where an assist may add itself to 655 // the blocked queue and park. If that happens, we'll 656 // just get it on the next flush. 657 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 658 return 659 } 660 661 assistBytesPerWork := gcController.assistBytesPerWork.Load() 662 scanBytes := int64(float64(scanWork) * assistBytesPerWork) 663 664 lock(&work.assistQueue.lock) 665 for !work.assistQueue.q.empty() && scanBytes > 0 { 666 gp := work.assistQueue.q.pop() 667 // Note that gp.gcAssistBytes is negative because gp 668 // is in debt. Think carefully about the signs below. 669 if scanBytes+gp.gcAssistBytes >= 0 { 670 // Satisfy this entire assist debt. 671 scanBytes += gp.gcAssistBytes 672 gp.gcAssistBytes = 0 673 // It's important that we *not* put gp in 674 // runnext. Otherwise, it's possible for user 675 // code to exploit the GC worker's high 676 // scheduler priority to get itself always run 677 // before other goroutines and always in the 678 // fresh quantum started by GC. 679 ready(gp, 0, false) 680 } else { 681 // Partially satisfy this assist. 682 gp.gcAssistBytes += scanBytes 683 scanBytes = 0 684 // As a heuristic, we move this assist to the 685 // back of the queue so that large assists 686 // can't clog up the assist queue and 687 // substantially delay small assists. 688 work.assistQueue.q.pushBack(gp) 689 break 690 } 691 } 692 693 if scanBytes > 0 { 694 // Convert from scan bytes back to work. 695 assistWorkPerByte := gcController.assistWorkPerByte.Load() 696 scanWork = int64(float64(scanBytes) * assistWorkPerByte) 697 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 698 } 699 unlock(&work.assistQueue.lock) 700 } 701 702 // scanstack scans gp's stack, greying all pointers found on the stack. 703 // 704 // For goexperiment.PacerRedesign: 705 // Returns the amount of scan work performed, but doesn't update 706 // gcController.stackScanWork or flush any credit. Any background credit produced 707 // by this function should be flushed by its caller. scanstack itself can't 708 // safely flush because it may result in trying to wake up a goroutine that 709 // was just scanned, resulting in a self-deadlock. 710 // 711 // scanstack will also shrink the stack if it is safe to do so. If it 712 // is not, it schedules a stack shrink for the next synchronous safe 713 // point. 714 // 715 // scanstack is marked go:systemstack because it must not be preempted 716 // while using a workbuf. 717 // 718 //go:nowritebarrier 719 //go:systemstack 720 func scanstack(gp *g, gcw *gcWork) int64 { 721 if readgstatus(gp)&_Gscan == 0 { 722 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 723 throw("scanstack - bad status") 724 } 725 726 switch readgstatus(gp) &^ _Gscan { 727 default: 728 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 729 throw("mark - bad status") 730 case _Gdead: 731 return 0 732 case _Grunning: 733 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 734 throw("scanstack: goroutine not stopped") 735 case _Grunnable, _Gsyscall, _Gwaiting: 736 // ok 737 } 738 739 if gp == getg() { 740 throw("can't scan our own stack") 741 } 742 743 // stackSize is the amount of work we'll be reporting. 744 // 745 // We report the total stack size, more than we scan, 746 // because this number needs to line up with gcControllerState's 747 // stackScan and scannableStackSize fields. 748 // 749 // See the documentation on those fields for more information. 750 stackSize := gp.stack.hi - gp.stack.lo 751 752 if isShrinkStackSafe(gp) { 753 // Shrink the stack if not much of it is being used. 754 shrinkstack(gp) 755 } else { 756 // Otherwise, shrink the stack at the next sync safe point. 757 gp.preemptShrink = true 758 } 759 760 var state stackScanState 761 state.stack = gp.stack 762 763 if stackTraceDebug { 764 println("stack trace goroutine", gp.goid) 765 } 766 767 if debugScanConservative && gp.asyncSafePoint { 768 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n") 769 } 770 771 // Scan the saved context register. This is effectively a live 772 // register that gets moved back and forth between the 773 // register and sched.ctxt without a write barrier. 774 if gp.sched.ctxt != nil { 775 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 776 } 777 778 // Scan the stack. Accumulate a list of stack objects. 779 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 780 scanframeworker(frame, &state, gcw) 781 return true 782 } 783 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 784 785 // Find additional pointers that point into the stack from the heap. 786 // Currently this includes defers and panics. See also function copystack. 787 788 // Find and trace other pointers in defer records. 789 for d := gp._defer; d != nil; d = d.link { 790 if d.fn != nil { 791 // Scan the func value, which could be a stack allocated closure. 792 // See issue 30453. 793 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 794 } 795 if d.link != nil { 796 // The link field of a stack-allocated defer record might point 797 // to a heap-allocated defer record. Keep that heap record live. 798 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 799 } 800 // Retain defers records themselves. 801 // Defer records might not be reachable from the G through regular heap 802 // tracing because the defer linked list might weave between the stack and the heap. 803 if d.heap { 804 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 805 } 806 } 807 if gp._panic != nil { 808 // Panics are always stack allocated. 809 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false) 810 } 811 812 // Find and scan all reachable stack objects. 813 // 814 // The state's pointer queue prioritizes precise pointers over 815 // conservative pointers so that we'll prefer scanning stack 816 // objects precisely. 817 state.buildIndex() 818 for { 819 p, conservative := state.getPtr() 820 if p == 0 { 821 break 822 } 823 obj := state.findObject(p) 824 if obj == nil { 825 continue 826 } 827 r := obj.r 828 if r == nil { 829 // We've already scanned this object. 830 continue 831 } 832 obj.setRecord(nil) // Don't scan it again. 833 if stackTraceDebug { 834 printlock() 835 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size) 836 if conservative { 837 print(" (conservative)") 838 } 839 println() 840 printunlock() 841 } 842 gcdata := r.gcdata() 843 var s *mspan 844 if r.useGCProg() { 845 // This path is pretty unlikely, an object large enough 846 // to have a GC program allocated on the stack. 847 // We need some space to unpack the program into a straight 848 // bitmask, which we allocate/free here. 849 // TODO: it would be nice if there were a way to run a GC 850 // program without having to store all its bits. We'd have 851 // to change from a Lempel-Ziv style program to something else. 852 // Or we can forbid putting objects on stacks if they require 853 // a gc program (see issue 27447). 854 s = materializeGCProg(r.ptrdata(), gcdata) 855 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 856 } 857 858 b := state.stack.lo + uintptr(obj.off) 859 if conservative { 860 scanConservative(b, r.ptrdata(), gcdata, gcw, &state) 861 } else { 862 scanblock(b, r.ptrdata(), gcdata, gcw, &state) 863 } 864 865 if s != nil { 866 dematerializeGCProg(s) 867 } 868 } 869 870 // Deallocate object buffers. 871 // (Pointer buffers were all deallocated in the loop above.) 872 for state.head != nil { 873 x := state.head 874 state.head = x.next 875 if stackTraceDebug { 876 for i := 0; i < x.nobj; i++ { 877 obj := &x.obj[i] 878 if obj.r == nil { // reachable 879 continue 880 } 881 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size) 882 // Note: not necessarily really dead - only reachable-from-ptr dead. 883 } 884 } 885 x.nobj = 0 886 putempty((*workbuf)(unsafe.Pointer(x))) 887 } 888 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil { 889 throw("remaining pointer buffers") 890 } 891 return int64(stackSize) 892 } 893 894 // Scan a stack frame: local variables and function arguments/results. 895 //go:nowritebarrier 896 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { 897 if _DebugGC > 1 && frame.continpc != 0 { 898 print("scanframe ", funcname(frame.fn), "\n") 899 } 900 901 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt 902 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV2 903 if state.conservative || isAsyncPreempt || isDebugCall { 904 if debugScanConservative { 905 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc)) 906 } 907 908 // Conservatively scan the frame. Unlike the precise 909 // case, this includes the outgoing argument space 910 // since we may have stopped while this function was 911 // setting up a call. 912 // 913 // TODO: We could narrow this down if the compiler 914 // produced a single map per function of stack slots 915 // and registers that ever contain a pointer. 916 if frame.varp != 0 { 917 size := frame.varp - frame.sp 918 if size > 0 { 919 scanConservative(frame.sp, size, nil, gcw, state) 920 } 921 } 922 923 // Scan arguments to this frame. 924 if frame.arglen != 0 { 925 // TODO: We could pass the entry argument map 926 // to narrow this down further. 927 scanConservative(frame.argp, frame.arglen, nil, gcw, state) 928 } 929 930 if isAsyncPreempt || isDebugCall { 931 // This function's frame contained the 932 // registers for the asynchronously stopped 933 // parent frame. Scan the parent 934 // conservatively. 935 state.conservative = true 936 } else { 937 // We only wanted to scan those two frames 938 // conservatively. Clear the flag for future 939 // frames. 940 state.conservative = false 941 } 942 return 943 } 944 945 locals, args, objs := getStackMap(frame, &state.cache, false) 946 947 // Scan local variables if stack frame has been allocated. 948 if locals.n > 0 { 949 size := uintptr(locals.n) * goarch.PtrSize 950 scanblock(frame.varp-size, size, locals.bytedata, gcw, state) 951 } 952 953 // Scan arguments. 954 if args.n > 0 { 955 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state) 956 } 957 958 // Add all stack objects to the stack object list. 959 if frame.varp != 0 { 960 // varp is 0 for defers, where there are no locals. 961 // In that case, there can't be a pointer to its args, either. 962 // (And all args would be scanned above anyway.) 963 for i := range objs { 964 obj := &objs[i] 965 off := obj.off 966 base := frame.varp // locals base pointer 967 if off >= 0 { 968 base = frame.argp // arguments and return values base pointer 969 } 970 ptr := base + uintptr(off) 971 if ptr < frame.sp { 972 // object hasn't been allocated in the frame yet. 973 continue 974 } 975 if stackTraceDebug { 976 println("stkobj at", hex(ptr), "of size", obj.size) 977 } 978 state.addObject(ptr, obj) 979 } 980 } 981 } 982 983 type gcDrainFlags int 984 985 const ( 986 gcDrainUntilPreempt gcDrainFlags = 1 << iota 987 gcDrainFlushBgCredit 988 gcDrainIdle 989 gcDrainFractional 990 ) 991 992 // gcDrain scans roots and objects in work buffers, blackening grey 993 // objects until it is unable to get more work. It may return before 994 // GC is done; it's the caller's responsibility to balance work from 995 // other Ps. 996 // 997 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 998 // is set. 999 // 1000 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work 1001 // to do. 1002 // 1003 // If flags&gcDrainFractional != 0, gcDrain self-preempts when 1004 // pollFractionalWorkerExit() returns true. This implies 1005 // gcDrainNoBlock. 1006 // 1007 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 1008 // credit to gcController.bgScanCredit every gcCreditSlack units of 1009 // scan work. 1010 // 1011 // gcDrain will always return if there is a pending STW. 1012 // 1013 //go:nowritebarrier 1014 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 1015 if !writeBarrier.needed { 1016 throw("gcDrain phase incorrect") 1017 } 1018 1019 gp := getg().m.curg 1020 preemptible := flags&gcDrainUntilPreempt != 0 1021 flushBgCredit := flags&gcDrainFlushBgCredit != 0 1022 idle := flags&gcDrainIdle != 0 1023 1024 initScanWork := gcw.heapScanWork 1025 1026 // checkWork is the scan work before performing the next 1027 // self-preempt check. 1028 checkWork := int64(1<<63 - 1) 1029 var check func() bool 1030 if flags&(gcDrainIdle|gcDrainFractional) != 0 { 1031 checkWork = initScanWork + drainCheckThreshold 1032 if idle { 1033 check = pollWork 1034 } else if flags&gcDrainFractional != 0 { 1035 check = pollFractionalWorkerExit 1036 } 1037 } 1038 1039 // Drain root marking jobs. 1040 if work.markrootNext < work.markrootJobs { 1041 // Stop if we're preemptible or if someone wants to STW. 1042 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { 1043 job := atomic.Xadd(&work.markrootNext, +1) - 1 1044 if job >= work.markrootJobs { 1045 break 1046 } 1047 markroot(gcw, job, flushBgCredit) 1048 if check != nil && check() { 1049 goto done 1050 } 1051 } 1052 } 1053 1054 // Drain heap marking jobs. 1055 // Stop if we're preemptible or if someone wants to STW. 1056 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { 1057 // Try to keep work available on the global queue. We used to 1058 // check if there were waiting workers, but it's better to 1059 // just keep work available than to make workers wait. In the 1060 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 1061 // balances. 1062 if work.full == 0 { 1063 gcw.balance() 1064 } 1065 1066 b := gcw.tryGetFast() 1067 if b == 0 { 1068 b = gcw.tryGet() 1069 if b == 0 { 1070 // Flush the write barrier 1071 // buffer; this may create 1072 // more work. 1073 wbBufFlush(nil, 0) 1074 b = gcw.tryGet() 1075 } 1076 } 1077 if b == 0 { 1078 // Unable to get work. 1079 break 1080 } 1081 scanobject(b, gcw) 1082 1083 // Flush background scan work credit to the global 1084 // account if we've accumulated enough locally so 1085 // mutator assists can draw on it. 1086 if gcw.heapScanWork >= gcCreditSlack { 1087 gcController.heapScanWork.Add(gcw.heapScanWork) 1088 if flushBgCredit { 1089 gcFlushBgCredit(gcw.heapScanWork - initScanWork) 1090 initScanWork = 0 1091 } 1092 checkWork -= gcw.heapScanWork 1093 gcw.heapScanWork = 0 1094 1095 if checkWork <= 0 { 1096 checkWork += drainCheckThreshold 1097 if check != nil && check() { 1098 break 1099 } 1100 } 1101 } 1102 } 1103 1104 done: 1105 // Flush remaining scan work credit. 1106 if gcw.heapScanWork > 0 { 1107 gcController.heapScanWork.Add(gcw.heapScanWork) 1108 if flushBgCredit { 1109 gcFlushBgCredit(gcw.heapScanWork - initScanWork) 1110 } 1111 gcw.heapScanWork = 0 1112 } 1113 } 1114 1115 // gcDrainN blackens grey objects until it has performed roughly 1116 // scanWork units of scan work or the G is preempted. This is 1117 // best-effort, so it may perform less work if it fails to get a work 1118 // buffer. Otherwise, it will perform at least n units of work, but 1119 // may perform more because scanning is always done in whole object 1120 // increments. It returns the amount of scan work performed. 1121 // 1122 // The caller goroutine must be in a preemptible state (e.g., 1123 // _Gwaiting) to prevent deadlocks during stack scanning. As a 1124 // consequence, this must be called on the system stack. 1125 // 1126 //go:nowritebarrier 1127 //go:systemstack 1128 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 1129 if !writeBarrier.needed { 1130 throw("gcDrainN phase incorrect") 1131 } 1132 1133 // There may already be scan work on the gcw, which we don't 1134 // want to claim was done by this call. 1135 workFlushed := -gcw.heapScanWork 1136 1137 gp := getg().m.curg 1138 for !gp.preempt && workFlushed+gcw.heapScanWork < scanWork { 1139 // See gcDrain comment. 1140 if work.full == 0 { 1141 gcw.balance() 1142 } 1143 1144 b := gcw.tryGetFast() 1145 if b == 0 { 1146 b = gcw.tryGet() 1147 if b == 0 { 1148 // Flush the write barrier buffer; 1149 // this may create more work. 1150 wbBufFlush(nil, 0) 1151 b = gcw.tryGet() 1152 } 1153 } 1154 1155 if b == 0 { 1156 // Try to do a root job. 1157 if work.markrootNext < work.markrootJobs { 1158 job := atomic.Xadd(&work.markrootNext, +1) - 1 1159 if job < work.markrootJobs { 1160 work := markroot(gcw, job, false) 1161 if goexperiment.PacerRedesign { 1162 workFlushed += work 1163 } 1164 continue 1165 } 1166 } 1167 // No heap or root jobs. 1168 break 1169 } 1170 1171 scanobject(b, gcw) 1172 1173 // Flush background scan work credit. 1174 if gcw.heapScanWork >= gcCreditSlack { 1175 gcController.heapScanWork.Add(gcw.heapScanWork) 1176 workFlushed += gcw.heapScanWork 1177 gcw.heapScanWork = 0 1178 } 1179 } 1180 1181 // Unlike gcDrain, there's no need to flush remaining work 1182 // here because this never flushes to bgScanCredit and 1183 // gcw.dispose will flush any remaining work to scanWork. 1184 1185 return workFlushed + gcw.heapScanWork 1186 } 1187 1188 // scanblock scans b as scanobject would, but using an explicit 1189 // pointer bitmap instead of the heap bitmap. 1190 // 1191 // This is used to scan non-heap roots, so it does not update 1192 // gcw.bytesMarked or gcw.heapScanWork. 1193 // 1194 // If stk != nil, possible stack pointers are also reported to stk.putPtr. 1195 //go:nowritebarrier 1196 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { 1197 // Use local copies of original parameters, so that a stack trace 1198 // due to one of the throws below shows the original block 1199 // base and extent. 1200 b := b0 1201 n := n0 1202 1203 for i := uintptr(0); i < n; { 1204 // Find bits for the next word. 1205 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8))) 1206 if bits == 0 { 1207 i += goarch.PtrSize * 8 1208 continue 1209 } 1210 for j := 0; j < 8 && i < n; j++ { 1211 if bits&1 != 0 { 1212 // Same work as in scanobject; see comments there. 1213 p := *(*uintptr)(unsafe.Pointer(b + i)) 1214 if p != 0 { 1215 if obj, span, objIndex := findObject(p, b, i); obj != 0 { 1216 greyobject(obj, b, i, span, gcw, objIndex) 1217 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { 1218 stk.putPtr(p, false) 1219 } 1220 } 1221 } 1222 bits >>= 1 1223 i += goarch.PtrSize 1224 } 1225 } 1226 } 1227 1228 // scanobject scans the object starting at b, adding pointers to gcw. 1229 // b must point to the beginning of a heap object or an oblet. 1230 // scanobject consults the GC bitmap for the pointer mask and the 1231 // spans for the size of the object. 1232 // 1233 //go:nowritebarrier 1234 func scanobject(b uintptr, gcw *gcWork) { 1235 // Prefetch object before we scan it. 1236 // 1237 // This will overlap fetching the beginning of the object with initial 1238 // setup before we start scanning the object. 1239 sys.Prefetch(b) 1240 1241 // Find the bits for b and the size of the object at b. 1242 // 1243 // b is either the beginning of an object, in which case this 1244 // is the size of the object to scan, or it points to an 1245 // oblet, in which case we compute the size to scan below. 1246 hbits := heapBitsForAddr(b) 1247 s := spanOfUnchecked(b) 1248 n := s.elemsize 1249 if n == 0 { 1250 throw("scanobject n == 0") 1251 } 1252 1253 if n > maxObletBytes { 1254 // Large object. Break into oblets for better 1255 // parallelism and lower latency. 1256 if b == s.base() { 1257 // It's possible this is a noscan object (not 1258 // from greyobject, but from other code 1259 // paths), in which case we must *not* enqueue 1260 // oblets since their bitmaps will be 1261 // uninitialized. 1262 if s.spanclass.noscan() { 1263 // Bypass the whole scan. 1264 gcw.bytesMarked += uint64(n) 1265 return 1266 } 1267 1268 // Enqueue the other oblets to scan later. 1269 // Some oblets may be in b's scalar tail, but 1270 // these will be marked as "no more pointers", 1271 // so we'll drop out immediately when we go to 1272 // scan those. 1273 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { 1274 if !gcw.putFast(oblet) { 1275 gcw.put(oblet) 1276 } 1277 } 1278 } 1279 1280 // Compute the size of the oblet. Since this object 1281 // must be a large object, s.base() is the beginning 1282 // of the object. 1283 n = s.base() + s.elemsize - b 1284 if n > maxObletBytes { 1285 n = maxObletBytes 1286 } 1287 } 1288 1289 var i uintptr 1290 for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() { 1291 // Load bits once. See CL 22712 and issue 16973 for discussion. 1292 bits := hbits.bits() 1293 if bits&bitScan == 0 { 1294 break // no more pointers in this object 1295 } 1296 if bits&bitPointer == 0 { 1297 continue // not a pointer 1298 } 1299 1300 // Work here is duplicated in scanblock and above. 1301 // If you make changes here, make changes there too. 1302 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1303 1304 // At this point we have extracted the next potential pointer. 1305 // Quickly filter out nil and pointers back to the current object. 1306 if obj != 0 && obj-b >= n { 1307 // Test if obj points into the Go heap and, if so, 1308 // mark the object. 1309 // 1310 // Note that it's possible for findObject to 1311 // fail if obj points to a just-allocated heap 1312 // object because of a race with growing the 1313 // heap. In this case, we know the object was 1314 // just allocated and hence will be marked by 1315 // allocation itself. 1316 if obj, span, objIndex := findObject(obj, b, i); obj != 0 { 1317 greyobject(obj, b, i, span, gcw, objIndex) 1318 } 1319 } 1320 } 1321 gcw.bytesMarked += uint64(n) 1322 gcw.heapScanWork += int64(i) 1323 } 1324 1325 // scanConservative scans block [b, b+n) conservatively, treating any 1326 // pointer-like value in the block as a pointer. 1327 // 1328 // If ptrmask != nil, only words that are marked in ptrmask are 1329 // considered as potential pointers. 1330 // 1331 // If state != nil, it's assumed that [b, b+n) is a block in the stack 1332 // and may contain pointers to stack objects. 1333 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) { 1334 if debugScanConservative { 1335 printlock() 1336 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n") 1337 hexdumpWords(b, b+n, func(p uintptr) byte { 1338 if ptrmask != nil { 1339 word := (p - b) / goarch.PtrSize 1340 bits := *addb(ptrmask, word/8) 1341 if (bits>>(word%8))&1 == 0 { 1342 return '$' 1343 } 1344 } 1345 1346 val := *(*uintptr)(unsafe.Pointer(p)) 1347 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1348 return '@' 1349 } 1350 1351 span := spanOfHeap(val) 1352 if span == nil { 1353 return ' ' 1354 } 1355 idx := span.objIndex(val) 1356 if span.isFree(idx) { 1357 return ' ' 1358 } 1359 return '*' 1360 }) 1361 printunlock() 1362 } 1363 1364 for i := uintptr(0); i < n; i += goarch.PtrSize { 1365 if ptrmask != nil { 1366 word := i / goarch.PtrSize 1367 bits := *addb(ptrmask, word/8) 1368 if bits == 0 { 1369 // Skip 8 words (the loop increment will do the 8th) 1370 // 1371 // This must be the first time we've 1372 // seen this word of ptrmask, so i 1373 // must be 8-word-aligned, but check 1374 // our reasoning just in case. 1375 if i%(goarch.PtrSize*8) != 0 { 1376 throw("misaligned mask") 1377 } 1378 i += goarch.PtrSize*8 - goarch.PtrSize 1379 continue 1380 } 1381 if (bits>>(word%8))&1 == 0 { 1382 continue 1383 } 1384 } 1385 1386 val := *(*uintptr)(unsafe.Pointer(b + i)) 1387 1388 // Check if val points into the stack. 1389 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1390 // val may point to a stack object. This 1391 // object may be dead from last cycle and 1392 // hence may contain pointers to unallocated 1393 // objects, but unlike heap objects we can't 1394 // tell if it's already dead. Hence, if all 1395 // pointers to this object are from 1396 // conservative scanning, we have to scan it 1397 // defensively, too. 1398 state.putPtr(val, true) 1399 continue 1400 } 1401 1402 // Check if val points to a heap span. 1403 span := spanOfHeap(val) 1404 if span == nil { 1405 continue 1406 } 1407 1408 // Check if val points to an allocated object. 1409 idx := span.objIndex(val) 1410 if span.isFree(idx) { 1411 continue 1412 } 1413 1414 // val points to an allocated object. Mark it. 1415 obj := span.base() + idx*span.elemsize 1416 greyobject(obj, b, i, span, gcw, idx) 1417 } 1418 } 1419 1420 // Shade the object if it isn't already. 1421 // The object is not nil and known to be in the heap. 1422 // Preemption must be disabled. 1423 //go:nowritebarrier 1424 func shade(b uintptr) { 1425 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { 1426 gcw := &getg().m.p.ptr().gcw 1427 greyobject(obj, 0, 0, span, gcw, objIndex) 1428 } 1429 } 1430 1431 // obj is the start of an object with mark mbits. 1432 // If it isn't already marked, mark it and enqueue into gcw. 1433 // base and off are for debugging only and could be removed. 1434 // 1435 // See also wbBufFlush1, which partially duplicates this logic. 1436 // 1437 //go:nowritebarrierrec 1438 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) { 1439 // obj should be start of allocation, and so must be at least pointer-aligned. 1440 if obj&(goarch.PtrSize-1) != 0 { 1441 throw("greyobject: obj not pointer-aligned") 1442 } 1443 mbits := span.markBitsForIndex(objIndex) 1444 1445 if useCheckmark { 1446 if setCheckmark(obj, base, off, mbits) { 1447 // Already marked. 1448 return 1449 } 1450 } else { 1451 if debug.gccheckmark > 0 && span.isFree(objIndex) { 1452 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") 1453 gcDumpObject("base", base, off) 1454 gcDumpObject("obj", obj, ^uintptr(0)) 1455 getg().m.traceback = 2 1456 throw("marking free object") 1457 } 1458 1459 // If marked we have nothing to do. 1460 if mbits.isMarked() { 1461 return 1462 } 1463 mbits.setMarked() 1464 1465 // Mark span. 1466 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1467 if arena.pageMarks[pageIdx]&pageMask == 0 { 1468 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1469 } 1470 1471 // If this is a noscan object, fast-track it to black 1472 // instead of greying it. 1473 if span.spanclass.noscan() { 1474 gcw.bytesMarked += uint64(span.elemsize) 1475 return 1476 } 1477 } 1478 1479 // We're adding obj to P's local workbuf, so it's likely 1480 // this object will be processed soon by the same P. 1481 // Even if the workbuf gets flushed, there will likely still be 1482 // some benefit on platforms with inclusive shared caches. 1483 sys.Prefetch(obj) 1484 // Queue the obj for scanning. 1485 if !gcw.putFast(obj) { 1486 gcw.put(obj) 1487 } 1488 } 1489 1490 // gcDumpObject dumps the contents of obj for debugging and marks the 1491 // field at byte offset off in obj. 1492 func gcDumpObject(label string, obj, off uintptr) { 1493 s := spanOf(obj) 1494 print(label, "=", hex(obj)) 1495 if s == nil { 1496 print(" s=nil\n") 1497 return 1498 } 1499 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=") 1500 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) { 1501 print(mSpanStateNames[state], "\n") 1502 } else { 1503 print("unknown(", state, ")\n") 1504 } 1505 1506 skipped := false 1507 size := s.elemsize 1508 if s.state.get() == mSpanManual && size == 0 { 1509 // We're printing something from a stack frame. We 1510 // don't know how big it is, so just show up to an 1511 // including off. 1512 size = off + goarch.PtrSize 1513 } 1514 for i := uintptr(0); i < size; i += goarch.PtrSize { 1515 // For big objects, just print the beginning (because 1516 // that usually hints at the object's type) and the 1517 // fields around off. 1518 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) { 1519 skipped = true 1520 continue 1521 } 1522 if skipped { 1523 print(" ...\n") 1524 skipped = false 1525 } 1526 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1527 if i == off { 1528 print(" <==") 1529 } 1530 print("\n") 1531 } 1532 if skipped { 1533 print(" ...\n") 1534 } 1535 } 1536 1537 // gcmarknewobject marks a newly allocated object black. obj must 1538 // not contain any non-nil pointers. 1539 // 1540 // This is nosplit so it can manipulate a gcWork without preemption. 1541 // 1542 //go:nowritebarrier 1543 //go:nosplit 1544 func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) { 1545 if useCheckmark { // The world should be stopped so this should not happen. 1546 throw("gcmarknewobject called while doing checkmark") 1547 } 1548 1549 // Mark object. 1550 objIndex := span.objIndex(obj) 1551 span.markBitsForIndex(objIndex).setMarked() 1552 1553 // Mark span. 1554 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1555 if arena.pageMarks[pageIdx]&pageMask == 0 { 1556 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1557 } 1558 1559 gcw := &getg().m.p.ptr().gcw 1560 gcw.bytesMarked += uint64(size) 1561 if !goexperiment.PacerRedesign { 1562 // The old pacer counts newly allocated memory toward 1563 // heapScanWork because heapScan is continuously updated 1564 // throughout the GC cyle with newly allocated memory. However, 1565 // these objects are never actually scanned, so we need 1566 // to account for them in heapScanWork here, "faking" their work. 1567 // Otherwise the pacer will think it's always behind, potentially 1568 // by a large margin. 1569 // 1570 // The new pacer doesn't care about this because it ceases to updated 1571 // heapScan once a GC cycle starts, effectively snapshotting it. 1572 gcw.heapScanWork += int64(scanSize) 1573 } 1574 } 1575 1576 // gcMarkTinyAllocs greys all active tiny alloc blocks. 1577 // 1578 // The world must be stopped. 1579 func gcMarkTinyAllocs() { 1580 assertWorldStopped() 1581 1582 for _, p := range allp { 1583 c := p.mcache 1584 if c == nil || c.tiny == 0 { 1585 continue 1586 } 1587 _, span, objIndex := findObject(c.tiny, 0, 0) 1588 gcw := &p.gcw 1589 greyobject(c.tiny, 0, 0, span, gcw, objIndex) 1590 } 1591 }