github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "internal/goarch" 11 "runtime/internal/atomic" 12 "runtime/internal/sys" 13 "unsafe" 14 ) 15 16 const ( 17 fixedRootFinalizers = iota 18 fixedRootFreeGStacks 19 fixedRootCount 20 21 // rootBlockBytes is the number of bytes to scan per data or 22 // BSS root. 23 rootBlockBytes = 256 << 10 24 25 // maxObletBytes is the maximum bytes of an object to scan at 26 // once. Larger objects will be split up into "oblets" of at 27 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds 28 // scan preemption at ~100 µs. 29 // 30 // This must be > _MaxSmallSize so that the object base is the 31 // span base. 32 maxObletBytes = 128 << 10 33 34 // drainCheckThreshold specifies how many units of work to do 35 // between self-preemption checks in gcDrain. Assuming a scan 36 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher 37 // overhead in the scan loop (the scheduler check may perform 38 // a syscall, so its overhead is nontrivial). Higher values 39 // make the system less responsive to incoming work. 40 drainCheckThreshold = 100000 41 42 // pagesPerSpanRoot indicates how many pages to scan from a span root 43 // at a time. Used by special root marking. 44 // 45 // Higher values improve throughput by increasing locality, but 46 // increase the minimum latency of a marking operation. 47 // 48 // Must be a multiple of the pageInUse bitmap element size and 49 // must also evenly divide pagesPerArena. 50 pagesPerSpanRoot = 512 51 ) 52 53 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 54 // some miscellany) and initializes scanning-related state. 55 // 56 // The world must be stopped. 57 func gcMarkRootPrepare() { 58 assertWorldStopped() 59 60 // Compute how many data and BSS root blocks there are. 61 nBlocks := func(bytes uintptr) int { 62 return int(divRoundUp(bytes, rootBlockBytes)) 63 } 64 65 work.nDataRoots = 0 66 work.nBSSRoots = 0 67 68 // Scan globals. 69 for _, datap := range activeModules() { 70 nDataRoots := nBlocks(datap.edata - datap.data) 71 if nDataRoots > work.nDataRoots { 72 work.nDataRoots = nDataRoots 73 } 74 } 75 76 for _, datap := range activeModules() { 77 nBSSRoots := nBlocks(datap.ebss - datap.bss) 78 if nBSSRoots > work.nBSSRoots { 79 work.nBSSRoots = nBSSRoots 80 } 81 } 82 83 // Scan span roots for finalizer specials. 84 // 85 // We depend on addfinalizer to mark objects that get 86 // finalizers after root marking. 87 // 88 // We're going to scan the whole heap (that was available at the time the 89 // mark phase started, i.e. markArenas) for in-use spans which have specials. 90 // 91 // Break up the work into arenas, and further into chunks. 92 // 93 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas 94 // is append-only. 95 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)] 96 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot) 97 98 // Scan stacks. 99 // 100 // Gs may be created after this point, but it's okay that we 101 // ignore them because they begin life without any roots, so 102 // there's nothing to scan, and any roots they create during 103 // the concurrent phase will be caught by the write barrier. 104 work.stackRoots = allGsSnapshot() 105 work.nStackRoots = len(work.stackRoots) 106 107 work.markrootNext = 0 108 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) 109 110 // Calculate base indexes of each root type 111 work.baseData = uint32(fixedRootCount) 112 work.baseBSS = work.baseData + uint32(work.nDataRoots) 113 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots) 114 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots) 115 work.baseEnd = work.baseStacks + uint32(work.nStackRoots) 116 } 117 118 // gcMarkRootCheck checks that all roots have been scanned. It is 119 // purely for debugging. 120 func gcMarkRootCheck() { 121 if work.markrootNext < work.markrootJobs { 122 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 123 throw("left over markroot jobs") 124 } 125 126 // Check that stacks have been scanned. 127 // 128 // We only check the first nStackRoots Gs that we should have scanned. 129 // Since we don't care about newer Gs (see comment in 130 // gcMarkRootPrepare), no locking is required. 131 i := 0 132 forEachGRace(func(gp *g) { 133 if i >= work.nStackRoots { 134 return 135 } 136 137 if !gp.gcscandone { 138 println("gp", gp, "goid", gp.goid, 139 "status", readgstatus(gp), 140 "gcscandone", gp.gcscandone) 141 throw("scan missed a g") 142 } 143 144 i++ 145 }) 146 } 147 148 // ptrmask for an allocation containing a single pointer. 149 var oneptrmask = [...]uint8{1} 150 151 // markroot scans the i'th root. 152 // 153 // Preemption must be disabled (because this uses a gcWork). 154 // 155 // Returns the amount of GC work credit produced by the operation. 156 // If flushBgCredit is true, then that credit is also flushed 157 // to the background credit pool. 158 // 159 // nowritebarrier is only advisory here. 160 // 161 //go:nowritebarrier 162 func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 { 163 // Note: if you add a case here, please also update heapdump.go:dumproots. 164 var workDone int64 165 var workCounter *atomic.Int64 166 switch { 167 case work.baseData <= i && i < work.baseBSS: 168 workCounter = &gcController.globalsScanWork 169 for _, datap := range activeModules() { 170 workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData)) 171 } 172 173 case work.baseBSS <= i && i < work.baseSpans: 174 workCounter = &gcController.globalsScanWork 175 for _, datap := range activeModules() { 176 workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS)) 177 } 178 179 case i == fixedRootFinalizers: 180 for fb := allfin; fb != nil; fb = fb.alllink { 181 cnt := uintptr(atomic.Load(&fb.cnt)) 182 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) 183 } 184 185 case i == fixedRootFreeGStacks: 186 // Switch to the system stack so we can call 187 // stackfree. 188 systemstack(markrootFreeGStacks) 189 190 case work.baseSpans <= i && i < work.baseStacks: 191 // mark mspan.specials 192 markrootSpans(gcw, int(i-work.baseSpans)) 193 194 default: 195 // the rest is scanning goroutine stacks 196 workCounter = &gcController.stackScanWork 197 if i < work.baseStacks || work.baseEnd <= i { 198 printlock() 199 print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n") 200 throw("markroot: bad index") 201 } 202 gp := work.stackRoots[i-work.baseStacks] 203 204 // remember when we've first observed the G blocked 205 // needed only to output in traceback 206 status := readgstatus(gp) // We are not in a scan state 207 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 208 gp.waitsince = work.tstart 209 } 210 211 // scanstack must be done on the system stack in case 212 // we're trying to scan our own stack. 213 systemstack(func() { 214 // If this is a self-scan, put the user G in 215 // _Gwaiting to prevent self-deadlock. It may 216 // already be in _Gwaiting if this is a mark 217 // worker or we're in mark termination. 218 userG := getg().m.curg 219 selfScan := gp == userG && readgstatus(userG) == _Grunning 220 if selfScan { 221 casGToWaiting(userG, _Grunning, waitReasonGarbageCollectionScan) 222 } 223 224 // TODO: suspendG blocks (and spins) until gp 225 // stops, which may take a while for 226 // running goroutines. Consider doing this in 227 // two phases where the first is non-blocking: 228 // we scan the stacks we can and ask running 229 // goroutines to scan themselves; and the 230 // second blocks. 231 stopped := suspendG(gp) 232 if stopped.dead { 233 gp.gcscandone = true 234 return 235 } 236 if gp.gcscandone { 237 throw("g already scanned") 238 } 239 workDone += scanstack(gp, gcw) 240 gp.gcscandone = true 241 resumeG(stopped) 242 243 if selfScan { 244 casgstatus(userG, _Gwaiting, _Grunning) 245 } 246 }) 247 } 248 if workCounter != nil && workDone != 0 { 249 workCounter.Add(workDone) 250 if flushBgCredit { 251 gcFlushBgCredit(workDone) 252 } 253 } 254 return workDone 255 } 256 257 // markrootBlock scans the shard'th shard of the block of memory [b0, 258 // b0+n0), with the given pointer mask. 259 // 260 // Returns the amount of work done. 261 // 262 //go:nowritebarrier 263 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 { 264 if rootBlockBytes%(8*goarch.PtrSize) != 0 { 265 // This is necessary to pick byte offsets in ptrmask0. 266 throw("rootBlockBytes must be a multiple of 8*ptrSize") 267 } 268 269 // Note that if b0 is toward the end of the address space, 270 // then b0 + rootBlockBytes might wrap around. 271 // These tests are written to avoid any possible overflow. 272 off := uintptr(shard) * rootBlockBytes 273 if off >= n0 { 274 return 0 275 } 276 b := b0 + off 277 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize)))) 278 n := uintptr(rootBlockBytes) 279 if off+n > n0 { 280 n = n0 - off 281 } 282 283 // Scan this shard. 284 scanblock(b, n, ptrmask, gcw, nil) 285 return int64(n) 286 } 287 288 // markrootFreeGStacks frees stacks of dead Gs. 289 // 290 // This does not free stacks of dead Gs cached on Ps, but having a few 291 // cached stacks around isn't a problem. 292 func markrootFreeGStacks() { 293 // Take list of dead Gs with stacks. 294 lock(&sched.gFree.lock) 295 list := sched.gFree.stack 296 sched.gFree.stack = gList{} 297 unlock(&sched.gFree.lock) 298 if list.empty() { 299 return 300 } 301 302 // Free stacks. 303 q := gQueue{list.head, list.head} 304 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 305 stackfree(gp.stack) 306 gp.stack.lo = 0 307 gp.stack.hi = 0 308 // Manipulate the queue directly since the Gs are 309 // already all linked the right way. 310 q.tail.set(gp) 311 } 312 313 // Put Gs back on the free list. 314 lock(&sched.gFree.lock) 315 sched.gFree.noStack.pushAll(q) 316 unlock(&sched.gFree.lock) 317 } 318 319 // markrootSpans marks roots for one shard of markArenas. 320 // 321 //go:nowritebarrier 322 func markrootSpans(gcw *gcWork, shard int) { 323 // Objects with finalizers have two GC-related invariants: 324 // 325 // 1) Everything reachable from the object must be marked. 326 // This ensures that when we pass the object to its finalizer, 327 // everything the finalizer can reach will be retained. 328 // 329 // 2) Finalizer specials (which are not in the garbage 330 // collected heap) are roots. In practice, this means the fn 331 // field must be scanned. 332 sg := mheap_.sweepgen 333 334 // Find the arena and page index into that arena for this shard. 335 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)] 336 ha := mheap_.arenas[ai.l1()][ai.l2()] 337 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena) 338 339 // Construct slice of bitmap which we'll iterate over. 340 specialsbits := ha.pageSpecials[arenaPage/8:] 341 specialsbits = specialsbits[:pagesPerSpanRoot/8] 342 for i := range specialsbits { 343 // Find set bits, which correspond to spans with specials. 344 specials := atomic.Load8(&specialsbits[i]) 345 if specials == 0 { 346 continue 347 } 348 for j := uint(0); j < 8; j++ { 349 if specials&(1<<j) == 0 { 350 continue 351 } 352 // Find the span for this bit. 353 // 354 // This value is guaranteed to be non-nil because having 355 // specials implies that the span is in-use, and since we're 356 // currently marking we can be sure that we don't have to worry 357 // about the span being freed and re-used. 358 s := ha.spans[arenaPage+uint(i)*8+j] 359 360 // The state must be mSpanInUse if the specials bit is set, so 361 // sanity check that. 362 if state := s.state.get(); state != mSpanInUse { 363 print("s.state = ", state, "\n") 364 throw("non in-use span found with specials bit set") 365 } 366 // Check that this span was swept (it may be cached or uncached). 367 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { 368 // sweepgen was updated (+2) during non-checkmark GC pass 369 print("sweep ", s.sweepgen, " ", sg, "\n") 370 throw("gc: unswept span") 371 } 372 373 // Lock the specials to prevent a special from being 374 // removed from the list while we're traversing it. 375 lock(&s.speciallock) 376 for sp := s.specials; sp != nil; sp = sp.next { 377 if sp.kind != _KindSpecialFinalizer { 378 continue 379 } 380 // don't mark finalized object, but scan it so we 381 // retain everything it points to. 382 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 383 // A finalizer can be set for an inner byte of an object, find object beginning. 384 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 385 386 // Mark everything that can be reached from 387 // the object (but *not* the object itself or 388 // we'll never collect it). 389 if !s.spanclass.noscan() { 390 scanobject(p, gcw) 391 } 392 393 // The special itself is a root. 394 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) 395 } 396 unlock(&s.speciallock) 397 } 398 } 399 } 400 401 // gcAssistAlloc performs GC work to make gp's assist debt positive. 402 // gp must be the calling user goroutine. 403 // 404 // This must be called with preemption enabled. 405 func gcAssistAlloc(gp *g) { 406 // Don't assist in non-preemptible contexts. These are 407 // generally fragile and won't allow the assist to block. 408 if getg() == gp.m.g0 { 409 return 410 } 411 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 412 return 413 } 414 415 traced := false 416 retry: 417 if go119MemoryLimitSupport && gcCPULimiter.limiting() { 418 // If the CPU limiter is enabled, intentionally don't 419 // assist to reduce the amount of CPU time spent in the GC. 420 if traced { 421 traceGCMarkAssistDone() 422 } 423 return 424 } 425 // Compute the amount of scan work we need to do to make the 426 // balance positive. When the required amount of work is low, 427 // we over-assist to build up credit for future allocations 428 // and amortize the cost of assisting. 429 assistWorkPerByte := gcController.assistWorkPerByte.Load() 430 assistBytesPerWork := gcController.assistBytesPerWork.Load() 431 debtBytes := -gp.gcAssistBytes 432 scanWork := int64(assistWorkPerByte * float64(debtBytes)) 433 if scanWork < gcOverAssistWork { 434 scanWork = gcOverAssistWork 435 debtBytes = int64(assistBytesPerWork * float64(scanWork)) 436 } 437 438 // Steal as much credit as we can from the background GC's 439 // scan credit. This is racy and may drop the background 440 // credit below 0 if two mutators steal at the same time. This 441 // will just cause steals to fail until credit is accumulated 442 // again, so in the long run it doesn't really matter, but we 443 // do have to handle the negative credit case. 444 bgScanCredit := gcController.bgScanCredit.Load() 445 stolen := int64(0) 446 if bgScanCredit > 0 { 447 if bgScanCredit < scanWork { 448 stolen = bgScanCredit 449 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen)) 450 } else { 451 stolen = scanWork 452 gp.gcAssistBytes += debtBytes 453 } 454 gcController.bgScanCredit.Add(-stolen) 455 456 scanWork -= stolen 457 458 if scanWork == 0 { 459 // We were able to steal all of the credit we 460 // needed. 461 if traced { 462 traceGCMarkAssistDone() 463 } 464 return 465 } 466 } 467 468 if trace.enabled && !traced { 469 traced = true 470 traceGCMarkAssistStart() 471 } 472 473 // Perform assist work 474 systemstack(func() { 475 gcAssistAlloc1(gp, scanWork) 476 // The user stack may have moved, so this can't touch 477 // anything on it until it returns from systemstack. 478 }) 479 480 completed := gp.param != nil 481 gp.param = nil 482 if completed { 483 gcMarkDone() 484 } 485 486 if gp.gcAssistBytes < 0 { 487 // We were unable steal enough credit or perform 488 // enough work to pay off the assist debt. We need to 489 // do one of these before letting the mutator allocate 490 // more to prevent over-allocation. 491 // 492 // If this is because we were preempted, reschedule 493 // and try some more. 494 if gp.preempt { 495 Gosched() 496 goto retry 497 } 498 499 // Add this G to an assist queue and park. When the GC 500 // has more background credit, it will satisfy queued 501 // assists before flushing to the global credit pool. 502 // 503 // Note that this does *not* get woken up when more 504 // work is added to the work list. The theory is that 505 // there wasn't enough work to do anyway, so we might 506 // as well let background marking take care of the 507 // work that is available. 508 if !gcParkAssist() { 509 goto retry 510 } 511 512 // At this point either background GC has satisfied 513 // this G's assist debt, or the GC cycle is over. 514 } 515 if traced { 516 traceGCMarkAssistDone() 517 } 518 } 519 520 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system 521 // stack. This is a separate function to make it easier to see that 522 // we're not capturing anything from the user stack, since the user 523 // stack may move while we're in this function. 524 // 525 // gcAssistAlloc1 indicates whether this assist completed the mark 526 // phase by setting gp.param to non-nil. This can't be communicated on 527 // the stack since it may move. 528 // 529 //go:systemstack 530 func gcAssistAlloc1(gp *g, scanWork int64) { 531 // Clear the flag indicating that this assist completed the 532 // mark phase. 533 gp.param = nil 534 535 if atomic.Load(&gcBlackenEnabled) == 0 { 536 // The gcBlackenEnabled check in malloc races with the 537 // store that clears it but an atomic check in every malloc 538 // would be a performance hit. 539 // Instead we recheck it here on the non-preemptable system 540 // stack to determine if we should perform an assist. 541 542 // GC is done, so ignore any remaining debt. 543 gp.gcAssistBytes = 0 544 return 545 } 546 // Track time spent in this assist. Since we're on the 547 // system stack, this is non-preemptible, so we can 548 // just measure start and end time. 549 // 550 // Limiter event tracking might be disabled if we end up here 551 // while on a mark worker. 552 startTime := nanotime() 553 trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime) 554 555 decnwait := atomic.Xadd(&work.nwait, -1) 556 if decnwait == work.nproc { 557 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 558 throw("nwait > work.nprocs") 559 } 560 561 // gcDrainN requires the caller to be preemptible. 562 casGToWaiting(gp, _Grunning, waitReasonGCAssistMarking) 563 564 // drain own cached work first in the hopes that it 565 // will be more cache friendly. 566 gcw := &getg().m.p.ptr().gcw 567 workDone := gcDrainN(gcw, scanWork) 568 569 casgstatus(gp, _Gwaiting, _Grunning) 570 571 // Record that we did this much scan work. 572 // 573 // Back out the number of bytes of assist credit that 574 // this scan work counts for. The "1+" is a poor man's 575 // round-up, to ensure this adds credit even if 576 // assistBytesPerWork is very low. 577 assistBytesPerWork := gcController.assistBytesPerWork.Load() 578 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone)) 579 580 // If this is the last worker and we ran out of work, 581 // signal a completion point. 582 incnwait := atomic.Xadd(&work.nwait, +1) 583 if incnwait > work.nproc { 584 println("runtime: work.nwait=", incnwait, 585 "work.nproc=", work.nproc) 586 throw("work.nwait > work.nproc") 587 } 588 589 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 590 // This has reached a background completion point. Set 591 // gp.param to a non-nil value to indicate this. It 592 // doesn't matter what we set it to (it just has to be 593 // a valid pointer). 594 gp.param = unsafe.Pointer(gp) 595 } 596 now := nanotime() 597 duration := now - startTime 598 pp := gp.m.p.ptr() 599 pp.gcAssistTime += duration 600 if trackLimiterEvent { 601 pp.limiterEvent.stop(limiterEventMarkAssist, now) 602 } 603 if pp.gcAssistTime > gcAssistTimeSlack { 604 gcController.assistTime.Add(pp.gcAssistTime) 605 gcCPULimiter.update(now) 606 pp.gcAssistTime = 0 607 } 608 } 609 610 // gcWakeAllAssists wakes all currently blocked assists. This is used 611 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 612 // new assists from going to sleep after this point. 613 func gcWakeAllAssists() { 614 lock(&work.assistQueue.lock) 615 list := work.assistQueue.q.popList() 616 injectglist(&list) 617 unlock(&work.assistQueue.lock) 618 } 619 620 // gcParkAssist puts the current goroutine on the assist queue and parks. 621 // 622 // gcParkAssist reports whether the assist is now satisfied. If it 623 // returns false, the caller must retry the assist. 624 func gcParkAssist() bool { 625 lock(&work.assistQueue.lock) 626 // If the GC cycle finished while we were getting the lock, 627 // exit the assist. The cycle can't finish while we hold the 628 // lock. 629 if atomic.Load(&gcBlackenEnabled) == 0 { 630 unlock(&work.assistQueue.lock) 631 return true 632 } 633 634 gp := getg() 635 oldList := work.assistQueue.q 636 work.assistQueue.q.pushBack(gp) 637 638 // Recheck for background credit now that this G is in 639 // the queue, but can still back out. This avoids a 640 // race in case background marking has flushed more 641 // credit since we checked above. 642 if gcController.bgScanCredit.Load() > 0 { 643 work.assistQueue.q = oldList 644 if oldList.tail != 0 { 645 oldList.tail.ptr().schedlink.set(nil) 646 } 647 unlock(&work.assistQueue.lock) 648 return false 649 } 650 // Park. 651 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2) 652 return true 653 } 654 655 // gcFlushBgCredit flushes scanWork units of background scan work 656 // credit. This first satisfies blocked assists on the 657 // work.assistQueue and then flushes any remaining credit to 658 // gcController.bgScanCredit. 659 // 660 // Write barriers are disallowed because this is used by gcDrain after 661 // it has ensured that all work is drained and this must preserve that 662 // condition. 663 // 664 //go:nowritebarrierrec 665 func gcFlushBgCredit(scanWork int64) { 666 if work.assistQueue.q.empty() { 667 // Fast path; there are no blocked assists. There's a 668 // small window here where an assist may add itself to 669 // the blocked queue and park. If that happens, we'll 670 // just get it on the next flush. 671 gcController.bgScanCredit.Add(scanWork) 672 return 673 } 674 675 assistBytesPerWork := gcController.assistBytesPerWork.Load() 676 scanBytes := int64(float64(scanWork) * assistBytesPerWork) 677 678 lock(&work.assistQueue.lock) 679 for !work.assistQueue.q.empty() && scanBytes > 0 { 680 gp := work.assistQueue.q.pop() 681 // Note that gp.gcAssistBytes is negative because gp 682 // is in debt. Think carefully about the signs below. 683 if scanBytes+gp.gcAssistBytes >= 0 { 684 // Satisfy this entire assist debt. 685 scanBytes += gp.gcAssistBytes 686 gp.gcAssistBytes = 0 687 // It's important that we *not* put gp in 688 // runnext. Otherwise, it's possible for user 689 // code to exploit the GC worker's high 690 // scheduler priority to get itself always run 691 // before other goroutines and always in the 692 // fresh quantum started by GC. 693 ready(gp, 0, false) 694 } else { 695 // Partially satisfy this assist. 696 gp.gcAssistBytes += scanBytes 697 scanBytes = 0 698 // As a heuristic, we move this assist to the 699 // back of the queue so that large assists 700 // can't clog up the assist queue and 701 // substantially delay small assists. 702 work.assistQueue.q.pushBack(gp) 703 break 704 } 705 } 706 707 if scanBytes > 0 { 708 // Convert from scan bytes back to work. 709 assistWorkPerByte := gcController.assistWorkPerByte.Load() 710 scanWork = int64(float64(scanBytes) * assistWorkPerByte) 711 gcController.bgScanCredit.Add(scanWork) 712 } 713 unlock(&work.assistQueue.lock) 714 } 715 716 // scanstack scans gp's stack, greying all pointers found on the stack. 717 // 718 // Returns the amount of scan work performed, but doesn't update 719 // gcController.stackScanWork or flush any credit. Any background credit produced 720 // by this function should be flushed by its caller. scanstack itself can't 721 // safely flush because it may result in trying to wake up a goroutine that 722 // was just scanned, resulting in a self-deadlock. 723 // 724 // scanstack will also shrink the stack if it is safe to do so. If it 725 // is not, it schedules a stack shrink for the next synchronous safe 726 // point. 727 // 728 // scanstack is marked go:systemstack because it must not be preempted 729 // while using a workbuf. 730 // 731 //go:nowritebarrier 732 //go:systemstack 733 func scanstack(gp *g, gcw *gcWork) int64 { 734 if readgstatus(gp)&_Gscan == 0 { 735 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 736 throw("scanstack - bad status") 737 } 738 739 switch readgstatus(gp) &^ _Gscan { 740 default: 741 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 742 throw("mark - bad status") 743 case _Gdead: 744 return 0 745 case _Grunning: 746 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 747 throw("scanstack: goroutine not stopped") 748 case _Grunnable, _Gsyscall, _Gwaiting: 749 // ok 750 } 751 752 if gp == getg() { 753 throw("can't scan our own stack") 754 } 755 756 // scannedSize is the amount of work we'll be reporting. 757 // 758 // It is less than the allocated size (which is hi-lo). 759 var sp uintptr 760 if gp.syscallsp != 0 { 761 sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows). 762 } else { 763 sp = gp.sched.sp 764 } 765 scannedSize := gp.stack.hi - sp 766 767 // Keep statistics for initial stack size calculation. 768 // Note that this accumulates the scanned size, not the allocated size. 769 p := getg().m.p.ptr() 770 p.scannedStackSize += uint64(scannedSize) 771 p.scannedStacks++ 772 773 if isShrinkStackSafe(gp) { 774 // Shrink the stack if not much of it is being used. 775 shrinkstack(gp) 776 } else { 777 // Otherwise, shrink the stack at the next sync safe point. 778 gp.preemptShrink = true 779 } 780 781 var state stackScanState 782 state.stack = gp.stack 783 784 if stackTraceDebug { 785 println("stack trace goroutine", gp.goid) 786 } 787 788 if debugScanConservative && gp.asyncSafePoint { 789 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n") 790 } 791 792 // Scan the saved context register. This is effectively a live 793 // register that gets moved back and forth between the 794 // register and sched.ctxt without a write barrier. 795 if gp.sched.ctxt != nil { 796 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 797 } 798 799 // Scan the stack. Accumulate a list of stack objects. 800 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 801 scanframeworker(frame, &state, gcw) 802 return true 803 } 804 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 805 806 // Find additional pointers that point into the stack from the heap. 807 // Currently this includes defers and panics. See also function copystack. 808 809 // Find and trace other pointers in defer records. 810 for d := gp._defer; d != nil; d = d.link { 811 if d.fn != nil { 812 // Scan the func value, which could be a stack allocated closure. 813 // See issue 30453. 814 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 815 } 816 if d.link != nil { 817 // The link field of a stack-allocated defer record might point 818 // to a heap-allocated defer record. Keep that heap record live. 819 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 820 } 821 // Retain defers records themselves. 822 // Defer records might not be reachable from the G through regular heap 823 // tracing because the defer linked list might weave between the stack and the heap. 824 if d.heap { 825 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state) 826 } 827 } 828 if gp._panic != nil { 829 // Panics are always stack allocated. 830 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false) 831 } 832 833 // Find and scan all reachable stack objects. 834 // 835 // The state's pointer queue prioritizes precise pointers over 836 // conservative pointers so that we'll prefer scanning stack 837 // objects precisely. 838 state.buildIndex() 839 for { 840 p, conservative := state.getPtr() 841 if p == 0 { 842 break 843 } 844 obj := state.findObject(p) 845 if obj == nil { 846 continue 847 } 848 r := obj.r 849 if r == nil { 850 // We've already scanned this object. 851 continue 852 } 853 obj.setRecord(nil) // Don't scan it again. 854 if stackTraceDebug { 855 printlock() 856 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size) 857 if conservative { 858 print(" (conservative)") 859 } 860 println() 861 printunlock() 862 } 863 gcdata := r.gcdata() 864 var s *mspan 865 if r.useGCProg() { 866 // This path is pretty unlikely, an object large enough 867 // to have a GC program allocated on the stack. 868 // We need some space to unpack the program into a straight 869 // bitmask, which we allocate/free here. 870 // TODO: it would be nice if there were a way to run a GC 871 // program without having to store all its bits. We'd have 872 // to change from a Lempel-Ziv style program to something else. 873 // Or we can forbid putting objects on stacks if they require 874 // a gc program (see issue 27447). 875 s = materializeGCProg(r.ptrdata(), gcdata) 876 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 877 } 878 879 b := state.stack.lo + uintptr(obj.off) 880 if conservative { 881 scanConservative(b, r.ptrdata(), gcdata, gcw, &state) 882 } else { 883 scanblock(b, r.ptrdata(), gcdata, gcw, &state) 884 } 885 886 if s != nil { 887 dematerializeGCProg(s) 888 } 889 } 890 891 // Deallocate object buffers. 892 // (Pointer buffers were all deallocated in the loop above.) 893 for state.head != nil { 894 x := state.head 895 state.head = x.next 896 if stackTraceDebug { 897 for i := 0; i < x.nobj; i++ { 898 obj := &x.obj[i] 899 if obj.r == nil { // reachable 900 continue 901 } 902 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size) 903 // Note: not necessarily really dead - only reachable-from-ptr dead. 904 } 905 } 906 x.nobj = 0 907 putempty((*workbuf)(unsafe.Pointer(x))) 908 } 909 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil { 910 throw("remaining pointer buffers") 911 } 912 return int64(scannedSize) 913 } 914 915 // Scan a stack frame: local variables and function arguments/results. 916 // 917 //go:nowritebarrier 918 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { 919 if _DebugGC > 1 && frame.continpc != 0 { 920 print("scanframe ", funcname(frame.fn), "\n") 921 } 922 923 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt 924 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV2 925 if state.conservative || isAsyncPreempt || isDebugCall { 926 if debugScanConservative { 927 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc)) 928 } 929 930 // Conservatively scan the frame. Unlike the precise 931 // case, this includes the outgoing argument space 932 // since we may have stopped while this function was 933 // setting up a call. 934 // 935 // TODO: We could narrow this down if the compiler 936 // produced a single map per function of stack slots 937 // and registers that ever contain a pointer. 938 if frame.varp != 0 { 939 size := frame.varp - frame.sp 940 if size > 0 { 941 scanConservative(frame.sp, size, nil, gcw, state) 942 } 943 } 944 945 // Scan arguments to this frame. 946 if n := frame.argBytes(); n != 0 { 947 // TODO: We could pass the entry argument map 948 // to narrow this down further. 949 scanConservative(frame.argp, n, nil, gcw, state) 950 } 951 952 if isAsyncPreempt || isDebugCall { 953 // This function's frame contained the 954 // registers for the asynchronously stopped 955 // parent frame. Scan the parent 956 // conservatively. 957 state.conservative = true 958 } else { 959 // We only wanted to scan those two frames 960 // conservatively. Clear the flag for future 961 // frames. 962 state.conservative = false 963 } 964 return 965 } 966 967 locals, args, objs := frame.getStackMap(&state.cache, false) 968 969 // Scan local variables if stack frame has been allocated. 970 if locals.n > 0 { 971 size := uintptr(locals.n) * goarch.PtrSize 972 scanblock(frame.varp-size, size, locals.bytedata, gcw, state) 973 } 974 975 // Scan arguments. 976 if args.n > 0 { 977 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state) 978 } 979 980 // Add all stack objects to the stack object list. 981 if frame.varp != 0 { 982 // varp is 0 for defers, where there are no locals. 983 // In that case, there can't be a pointer to its args, either. 984 // (And all args would be scanned above anyway.) 985 for i := range objs { 986 obj := &objs[i] 987 off := obj.off 988 base := frame.varp // locals base pointer 989 if off >= 0 { 990 base = frame.argp // arguments and return values base pointer 991 } 992 ptr := base + uintptr(off) 993 if ptr < frame.sp { 994 // object hasn't been allocated in the frame yet. 995 continue 996 } 997 if stackTraceDebug { 998 println("stkobj at", hex(ptr), "of size", obj.size) 999 } 1000 state.addObject(ptr, obj) 1001 } 1002 } 1003 } 1004 1005 type gcDrainFlags int 1006 1007 const ( 1008 gcDrainUntilPreempt gcDrainFlags = 1 << iota 1009 gcDrainFlushBgCredit 1010 gcDrainIdle 1011 gcDrainFractional 1012 ) 1013 1014 // gcDrain scans roots and objects in work buffers, blackening grey 1015 // objects until it is unable to get more work. It may return before 1016 // GC is done; it's the caller's responsibility to balance work from 1017 // other Ps. 1018 // 1019 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 1020 // is set. 1021 // 1022 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work 1023 // to do. 1024 // 1025 // If flags&gcDrainFractional != 0, gcDrain self-preempts when 1026 // pollFractionalWorkerExit() returns true. This implies 1027 // gcDrainNoBlock. 1028 // 1029 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 1030 // credit to gcController.bgScanCredit every gcCreditSlack units of 1031 // scan work. 1032 // 1033 // gcDrain will always return if there is a pending STW. 1034 // 1035 //go:nowritebarrier 1036 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 1037 if !writeBarrier.needed { 1038 throw("gcDrain phase incorrect") 1039 } 1040 1041 gp := getg().m.curg 1042 preemptible := flags&gcDrainUntilPreempt != 0 1043 flushBgCredit := flags&gcDrainFlushBgCredit != 0 1044 idle := flags&gcDrainIdle != 0 1045 1046 initScanWork := gcw.heapScanWork 1047 1048 // checkWork is the scan work before performing the next 1049 // self-preempt check. 1050 checkWork := int64(1<<63 - 1) 1051 var check func() bool 1052 if flags&(gcDrainIdle|gcDrainFractional) != 0 { 1053 checkWork = initScanWork + drainCheckThreshold 1054 if idle { 1055 check = pollWork 1056 } else if flags&gcDrainFractional != 0 { 1057 check = pollFractionalWorkerExit 1058 } 1059 } 1060 1061 // Drain root marking jobs. 1062 if work.markrootNext < work.markrootJobs { 1063 // Stop if we're preemptible or if someone wants to STW. 1064 for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { 1065 job := atomic.Xadd(&work.markrootNext, +1) - 1 1066 if job >= work.markrootJobs { 1067 break 1068 } 1069 markroot(gcw, job, flushBgCredit) 1070 if check != nil && check() { 1071 goto done 1072 } 1073 } 1074 } 1075 1076 // Drain heap marking jobs. 1077 // Stop if we're preemptible or if someone wants to STW. 1078 for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { 1079 // Try to keep work available on the global queue. We used to 1080 // check if there were waiting workers, but it's better to 1081 // just keep work available than to make workers wait. In the 1082 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 1083 // balances. 1084 if work.full == 0 { 1085 gcw.balance() 1086 } 1087 1088 b := gcw.tryGetFast() 1089 if b == 0 { 1090 b = gcw.tryGet() 1091 if b == 0 { 1092 // Flush the write barrier 1093 // buffer; this may create 1094 // more work. 1095 wbBufFlush(nil, 0) 1096 b = gcw.tryGet() 1097 } 1098 } 1099 if b == 0 { 1100 // Unable to get work. 1101 break 1102 } 1103 scanobject(b, gcw) 1104 1105 // Flush background scan work credit to the global 1106 // account if we've accumulated enough locally so 1107 // mutator assists can draw on it. 1108 if gcw.heapScanWork >= gcCreditSlack { 1109 gcController.heapScanWork.Add(gcw.heapScanWork) 1110 if flushBgCredit { 1111 gcFlushBgCredit(gcw.heapScanWork - initScanWork) 1112 initScanWork = 0 1113 } 1114 checkWork -= gcw.heapScanWork 1115 gcw.heapScanWork = 0 1116 1117 if checkWork <= 0 { 1118 checkWork += drainCheckThreshold 1119 if check != nil && check() { 1120 break 1121 } 1122 } 1123 } 1124 } 1125 1126 done: 1127 // Flush remaining scan work credit. 1128 if gcw.heapScanWork > 0 { 1129 gcController.heapScanWork.Add(gcw.heapScanWork) 1130 if flushBgCredit { 1131 gcFlushBgCredit(gcw.heapScanWork - initScanWork) 1132 } 1133 gcw.heapScanWork = 0 1134 } 1135 } 1136 1137 // gcDrainN blackens grey objects until it has performed roughly 1138 // scanWork units of scan work or the G is preempted. This is 1139 // best-effort, so it may perform less work if it fails to get a work 1140 // buffer. Otherwise, it will perform at least n units of work, but 1141 // may perform more because scanning is always done in whole object 1142 // increments. It returns the amount of scan work performed. 1143 // 1144 // The caller goroutine must be in a preemptible state (e.g., 1145 // _Gwaiting) to prevent deadlocks during stack scanning. As a 1146 // consequence, this must be called on the system stack. 1147 // 1148 //go:nowritebarrier 1149 //go:systemstack 1150 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 1151 if !writeBarrier.needed { 1152 throw("gcDrainN phase incorrect") 1153 } 1154 1155 // There may already be scan work on the gcw, which we don't 1156 // want to claim was done by this call. 1157 workFlushed := -gcw.heapScanWork 1158 1159 // In addition to backing out because of a preemption, back out 1160 // if the GC CPU limiter is enabled. 1161 gp := getg().m.curg 1162 for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork { 1163 // See gcDrain comment. 1164 if work.full == 0 { 1165 gcw.balance() 1166 } 1167 1168 b := gcw.tryGetFast() 1169 if b == 0 { 1170 b = gcw.tryGet() 1171 if b == 0 { 1172 // Flush the write barrier buffer; 1173 // this may create more work. 1174 wbBufFlush(nil, 0) 1175 b = gcw.tryGet() 1176 } 1177 } 1178 1179 if b == 0 { 1180 // Try to do a root job. 1181 if work.markrootNext < work.markrootJobs { 1182 job := atomic.Xadd(&work.markrootNext, +1) - 1 1183 if job < work.markrootJobs { 1184 workFlushed += markroot(gcw, job, false) 1185 continue 1186 } 1187 } 1188 // No heap or root jobs. 1189 break 1190 } 1191 1192 scanobject(b, gcw) 1193 1194 // Flush background scan work credit. 1195 if gcw.heapScanWork >= gcCreditSlack { 1196 gcController.heapScanWork.Add(gcw.heapScanWork) 1197 workFlushed += gcw.heapScanWork 1198 gcw.heapScanWork = 0 1199 } 1200 } 1201 1202 // Unlike gcDrain, there's no need to flush remaining work 1203 // here because this never flushes to bgScanCredit and 1204 // gcw.dispose will flush any remaining work to scanWork. 1205 1206 return workFlushed + gcw.heapScanWork 1207 } 1208 1209 // scanblock scans b as scanobject would, but using an explicit 1210 // pointer bitmap instead of the heap bitmap. 1211 // 1212 // This is used to scan non-heap roots, so it does not update 1213 // gcw.bytesMarked or gcw.heapScanWork. 1214 // 1215 // If stk != nil, possible stack pointers are also reported to stk.putPtr. 1216 // 1217 //go:nowritebarrier 1218 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { 1219 // Use local copies of original parameters, so that a stack trace 1220 // due to one of the throws below shows the original block 1221 // base and extent. 1222 b := b0 1223 n := n0 1224 1225 for i := uintptr(0); i < n; { 1226 // Find bits for the next word. 1227 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8))) 1228 if bits == 0 { 1229 i += goarch.PtrSize * 8 1230 continue 1231 } 1232 for j := 0; j < 8 && i < n; j++ { 1233 if bits&1 != 0 { 1234 // Same work as in scanobject; see comments there. 1235 p := *(*uintptr)(unsafe.Pointer(b + i)) 1236 if p != 0 { 1237 if obj, span, objIndex := findObject(p, b, i); obj != 0 { 1238 greyobject(obj, b, i, span, gcw, objIndex) 1239 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { 1240 stk.putPtr(p, false) 1241 } 1242 } 1243 } 1244 bits >>= 1 1245 i += goarch.PtrSize 1246 } 1247 } 1248 } 1249 1250 // scanobject scans the object starting at b, adding pointers to gcw. 1251 // b must point to the beginning of a heap object or an oblet. 1252 // scanobject consults the GC bitmap for the pointer mask and the 1253 // spans for the size of the object. 1254 // 1255 //go:nowritebarrier 1256 func scanobject(b uintptr, gcw *gcWork) { 1257 // Prefetch object before we scan it. 1258 // 1259 // This will overlap fetching the beginning of the object with initial 1260 // setup before we start scanning the object. 1261 sys.Prefetch(b) 1262 1263 // Find the bits for b and the size of the object at b. 1264 // 1265 // b is either the beginning of an object, in which case this 1266 // is the size of the object to scan, or it points to an 1267 // oblet, in which case we compute the size to scan below. 1268 s := spanOfUnchecked(b) 1269 n := s.elemsize 1270 if n == 0 { 1271 throw("scanobject n == 0") 1272 } 1273 if s.spanclass.noscan() { 1274 // Correctness-wise this is ok, but it's inefficient 1275 // if noscan objects reach here. 1276 throw("scanobject of a noscan object") 1277 } 1278 1279 if n > maxObletBytes { 1280 // Large object. Break into oblets for better 1281 // parallelism and lower latency. 1282 if b == s.base() { 1283 // Enqueue the other oblets to scan later. 1284 // Some oblets may be in b's scalar tail, but 1285 // these will be marked as "no more pointers", 1286 // so we'll drop out immediately when we go to 1287 // scan those. 1288 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { 1289 if !gcw.putFast(oblet) { 1290 gcw.put(oblet) 1291 } 1292 } 1293 } 1294 1295 // Compute the size of the oblet. Since this object 1296 // must be a large object, s.base() is the beginning 1297 // of the object. 1298 n = s.base() + s.elemsize - b 1299 if n > maxObletBytes { 1300 n = maxObletBytes 1301 } 1302 } 1303 1304 hbits := heapBitsForAddr(b, n) 1305 var scanSize uintptr 1306 for { 1307 var addr uintptr 1308 if hbits, addr = hbits.nextFast(); addr == 0 { 1309 if hbits, addr = hbits.next(); addr == 0 { 1310 break 1311 } 1312 } 1313 1314 // Keep track of farthest pointer we found, so we can 1315 // update heapScanWork. TODO: is there a better metric, 1316 // now that we can skip scalar portions pretty efficiently? 1317 scanSize = addr - b + goarch.PtrSize 1318 1319 // Work here is duplicated in scanblock and above. 1320 // If you make changes here, make changes there too. 1321 obj := *(*uintptr)(unsafe.Pointer(addr)) 1322 1323 // At this point we have extracted the next potential pointer. 1324 // Quickly filter out nil and pointers back to the current object. 1325 if obj != 0 && obj-b >= n { 1326 // Test if obj points into the Go heap and, if so, 1327 // mark the object. 1328 // 1329 // Note that it's possible for findObject to 1330 // fail if obj points to a just-allocated heap 1331 // object because of a race with growing the 1332 // heap. In this case, we know the object was 1333 // just allocated and hence will be marked by 1334 // allocation itself. 1335 if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 { 1336 greyobject(obj, b, addr-b, span, gcw, objIndex) 1337 } 1338 } 1339 } 1340 gcw.bytesMarked += uint64(n) 1341 gcw.heapScanWork += int64(scanSize) 1342 } 1343 1344 // scanConservative scans block [b, b+n) conservatively, treating any 1345 // pointer-like value in the block as a pointer. 1346 // 1347 // If ptrmask != nil, only words that are marked in ptrmask are 1348 // considered as potential pointers. 1349 // 1350 // If state != nil, it's assumed that [b, b+n) is a block in the stack 1351 // and may contain pointers to stack objects. 1352 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) { 1353 if debugScanConservative { 1354 printlock() 1355 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n") 1356 hexdumpWords(b, b+n, func(p uintptr) byte { 1357 if ptrmask != nil { 1358 word := (p - b) / goarch.PtrSize 1359 bits := *addb(ptrmask, word/8) 1360 if (bits>>(word%8))&1 == 0 { 1361 return '$' 1362 } 1363 } 1364 1365 val := *(*uintptr)(unsafe.Pointer(p)) 1366 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1367 return '@' 1368 } 1369 1370 span := spanOfHeap(val) 1371 if span == nil { 1372 return ' ' 1373 } 1374 idx := span.objIndex(val) 1375 if span.isFree(idx) { 1376 return ' ' 1377 } 1378 return '*' 1379 }) 1380 printunlock() 1381 } 1382 1383 for i := uintptr(0); i < n; i += goarch.PtrSize { 1384 if ptrmask != nil { 1385 word := i / goarch.PtrSize 1386 bits := *addb(ptrmask, word/8) 1387 if bits == 0 { 1388 // Skip 8 words (the loop increment will do the 8th) 1389 // 1390 // This must be the first time we've 1391 // seen this word of ptrmask, so i 1392 // must be 8-word-aligned, but check 1393 // our reasoning just in case. 1394 if i%(goarch.PtrSize*8) != 0 { 1395 throw("misaligned mask") 1396 } 1397 i += goarch.PtrSize*8 - goarch.PtrSize 1398 continue 1399 } 1400 if (bits>>(word%8))&1 == 0 { 1401 continue 1402 } 1403 } 1404 1405 val := *(*uintptr)(unsafe.Pointer(b + i)) 1406 1407 // Check if val points into the stack. 1408 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1409 // val may point to a stack object. This 1410 // object may be dead from last cycle and 1411 // hence may contain pointers to unallocated 1412 // objects, but unlike heap objects we can't 1413 // tell if it's already dead. Hence, if all 1414 // pointers to this object are from 1415 // conservative scanning, we have to scan it 1416 // defensively, too. 1417 state.putPtr(val, true) 1418 continue 1419 } 1420 1421 // Check if val points to a heap span. 1422 span := spanOfHeap(val) 1423 if span == nil { 1424 continue 1425 } 1426 1427 // Check if val points to an allocated object. 1428 idx := span.objIndex(val) 1429 if span.isFree(idx) { 1430 continue 1431 } 1432 1433 // val points to an allocated object. Mark it. 1434 obj := span.base() + idx*span.elemsize 1435 greyobject(obj, b, i, span, gcw, idx) 1436 } 1437 } 1438 1439 // Shade the object if it isn't already. 1440 // The object is not nil and known to be in the heap. 1441 // Preemption must be disabled. 1442 // 1443 //go:nowritebarrier 1444 func shade(b uintptr) { 1445 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { 1446 gcw := &getg().m.p.ptr().gcw 1447 greyobject(obj, 0, 0, span, gcw, objIndex) 1448 } 1449 } 1450 1451 // obj is the start of an object with mark mbits. 1452 // If it isn't already marked, mark it and enqueue into gcw. 1453 // base and off are for debugging only and could be removed. 1454 // 1455 // See also wbBufFlush1, which partially duplicates this logic. 1456 // 1457 //go:nowritebarrierrec 1458 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) { 1459 // obj should be start of allocation, and so must be at least pointer-aligned. 1460 if obj&(goarch.PtrSize-1) != 0 { 1461 throw("greyobject: obj not pointer-aligned") 1462 } 1463 mbits := span.markBitsForIndex(objIndex) 1464 1465 if useCheckmark { 1466 if setCheckmark(obj, base, off, mbits) { 1467 // Already marked. 1468 return 1469 } 1470 } else { 1471 if debug.gccheckmark > 0 && span.isFree(objIndex) { 1472 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") 1473 gcDumpObject("base", base, off) 1474 gcDumpObject("obj", obj, ^uintptr(0)) 1475 getg().m.traceback = 2 1476 throw("marking free object") 1477 } 1478 1479 // If marked we have nothing to do. 1480 if mbits.isMarked() { 1481 return 1482 } 1483 mbits.setMarked() 1484 1485 // Mark span. 1486 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1487 if arena.pageMarks[pageIdx]&pageMask == 0 { 1488 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1489 } 1490 1491 // If this is a noscan object, fast-track it to black 1492 // instead of greying it. 1493 if span.spanclass.noscan() { 1494 gcw.bytesMarked += uint64(span.elemsize) 1495 return 1496 } 1497 } 1498 1499 // We're adding obj to P's local workbuf, so it's likely 1500 // this object will be processed soon by the same P. 1501 // Even if the workbuf gets flushed, there will likely still be 1502 // some benefit on platforms with inclusive shared caches. 1503 sys.Prefetch(obj) 1504 // Queue the obj for scanning. 1505 if !gcw.putFast(obj) { 1506 gcw.put(obj) 1507 } 1508 } 1509 1510 // gcDumpObject dumps the contents of obj for debugging and marks the 1511 // field at byte offset off in obj. 1512 func gcDumpObject(label string, obj, off uintptr) { 1513 s := spanOf(obj) 1514 print(label, "=", hex(obj)) 1515 if s == nil { 1516 print(" s=nil\n") 1517 return 1518 } 1519 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=") 1520 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) { 1521 print(mSpanStateNames[state], "\n") 1522 } else { 1523 print("unknown(", state, ")\n") 1524 } 1525 1526 skipped := false 1527 size := s.elemsize 1528 if s.state.get() == mSpanManual && size == 0 { 1529 // We're printing something from a stack frame. We 1530 // don't know how big it is, so just show up to an 1531 // including off. 1532 size = off + goarch.PtrSize 1533 } 1534 for i := uintptr(0); i < size; i += goarch.PtrSize { 1535 // For big objects, just print the beginning (because 1536 // that usually hints at the object's type) and the 1537 // fields around off. 1538 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) { 1539 skipped = true 1540 continue 1541 } 1542 if skipped { 1543 print(" ...\n") 1544 skipped = false 1545 } 1546 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1547 if i == off { 1548 print(" <==") 1549 } 1550 print("\n") 1551 } 1552 if skipped { 1553 print(" ...\n") 1554 } 1555 } 1556 1557 // gcmarknewobject marks a newly allocated object black. obj must 1558 // not contain any non-nil pointers. 1559 // 1560 // This is nosplit so it can manipulate a gcWork without preemption. 1561 // 1562 //go:nowritebarrier 1563 //go:nosplit 1564 func gcmarknewobject(span *mspan, obj, size uintptr) { 1565 if useCheckmark { // The world should be stopped so this should not happen. 1566 throw("gcmarknewobject called while doing checkmark") 1567 } 1568 1569 // Mark object. 1570 objIndex := span.objIndex(obj) 1571 span.markBitsForIndex(objIndex).setMarked() 1572 1573 // Mark span. 1574 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1575 if arena.pageMarks[pageIdx]&pageMask == 0 { 1576 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1577 } 1578 1579 gcw := &getg().m.p.ptr().gcw 1580 gcw.bytesMarked += uint64(size) 1581 } 1582 1583 // gcMarkTinyAllocs greys all active tiny alloc blocks. 1584 // 1585 // The world must be stopped. 1586 func gcMarkTinyAllocs() { 1587 assertWorldStopped() 1588 1589 for _, p := range allp { 1590 c := p.mcache 1591 if c == nil || c.tiny == 0 { 1592 continue 1593 } 1594 _, span, objIndex := findObject(c.tiny, 0, 0) 1595 gcw := &p.gcw 1596 greyobject(c.tiny, 0, 0, span, gcw, objIndex) 1597 } 1598 }