github.com/rafaeltorres324/go/src@v0.0.0-20210519164414-9fdf653a9838/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 const ( 16 fixedRootFinalizers = iota 17 fixedRootFreeGStacks 18 fixedRootCount 19 20 // rootBlockBytes is the number of bytes to scan per data or 21 // BSS root. 22 rootBlockBytes = 256 << 10 23 24 // maxObletBytes is the maximum bytes of an object to scan at 25 // once. Larger objects will be split up into "oblets" of at 26 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds 27 // scan preemption at ~100 µs. 28 // 29 // This must be > _MaxSmallSize so that the object base is the 30 // span base. 31 maxObletBytes = 128 << 10 32 33 // drainCheckThreshold specifies how many units of work to do 34 // between self-preemption checks in gcDrain. Assuming a scan 35 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher 36 // overhead in the scan loop (the scheduler check may perform 37 // a syscall, so its overhead is nontrivial). Higher values 38 // make the system less responsive to incoming work. 39 drainCheckThreshold = 100000 40 41 // pagesPerSpanRoot indicates how many pages to scan from a span root 42 // at a time. Used by special root marking. 43 // 44 // Higher values improve throughput by increasing locality, but 45 // increase the minimum latency of a marking operation. 46 // 47 // Must be a multiple of the pageInUse bitmap element size and 48 // must also evenly divide pagesPerArena. 49 pagesPerSpanRoot = 512 50 ) 51 52 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 53 // some miscellany) and initializes scanning-related state. 54 // 55 // The world must be stopped. 56 func gcMarkRootPrepare() { 57 assertWorldStopped() 58 59 work.nFlushCacheRoots = 0 60 61 // Compute how many data and BSS root blocks there are. 62 nBlocks := func(bytes uintptr) int { 63 return int(divRoundUp(bytes, rootBlockBytes)) 64 } 65 66 work.nDataRoots = 0 67 work.nBSSRoots = 0 68 69 // Scan globals. 70 for _, datap := range activeModules() { 71 nDataRoots := nBlocks(datap.edata - datap.data) 72 if nDataRoots > work.nDataRoots { 73 work.nDataRoots = nDataRoots 74 } 75 } 76 77 for _, datap := range activeModules() { 78 nBSSRoots := nBlocks(datap.ebss - datap.bss) 79 if nBSSRoots > work.nBSSRoots { 80 work.nBSSRoots = nBSSRoots 81 } 82 } 83 84 // Scan span roots for finalizer specials. 85 // 86 // We depend on addfinalizer to mark objects that get 87 // finalizers after root marking. 88 // 89 // We're going to scan the whole heap (that was available at the time the 90 // mark phase started, i.e. markArenas) for in-use spans which have specials. 91 // 92 // Break up the work into arenas, and further into chunks. 93 // 94 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas 95 // is append-only. 96 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)] 97 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot) 98 99 // Scan stacks. 100 // 101 // Gs may be created after this point, but it's okay that we 102 // ignore them because they begin life without any roots, so 103 // there's nothing to scan, and any roots they create during 104 // the concurrent phase will be caught by the write barrier. 105 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 106 107 work.markrootNext = 0 108 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) 109 } 110 111 // gcMarkRootCheck checks that all roots have been scanned. It is 112 // purely for debugging. 113 func gcMarkRootCheck() { 114 if work.markrootNext < work.markrootJobs { 115 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 116 throw("left over markroot jobs") 117 } 118 119 lock(&allglock) 120 // Check that stacks have been scanned. 121 var gp *g 122 for i := 0; i < work.nStackRoots; i++ { 123 gp = allgs[i] 124 if !gp.gcscandone { 125 goto fail 126 } 127 } 128 unlock(&allglock) 129 return 130 131 fail: 132 println("gp", gp, "goid", gp.goid, 133 "status", readgstatus(gp), 134 "gcscandone", gp.gcscandone) 135 throw("scan missed a g") 136 } 137 138 // ptrmask for an allocation containing a single pointer. 139 var oneptrmask = [...]uint8{1} 140 141 // markroot scans the i'th root. 142 // 143 // Preemption must be disabled (because this uses a gcWork). 144 // 145 // nowritebarrier is only advisory here. 146 // 147 //go:nowritebarrier 148 func markroot(gcw *gcWork, i uint32) { 149 // TODO(austin): This is a bit ridiculous. Compute and store 150 // the bases in gcMarkRootPrepare instead of the counts. 151 baseFlushCache := uint32(fixedRootCount) 152 baseData := baseFlushCache + uint32(work.nFlushCacheRoots) 153 baseBSS := baseData + uint32(work.nDataRoots) 154 baseSpans := baseBSS + uint32(work.nBSSRoots) 155 baseStacks := baseSpans + uint32(work.nSpanRoots) 156 end := baseStacks + uint32(work.nStackRoots) 157 158 // Note: if you add a case here, please also update heapdump.go:dumproots. 159 switch { 160 case baseFlushCache <= i && i < baseData: 161 flushmcache(int(i - baseFlushCache)) 162 163 case baseData <= i && i < baseBSS: 164 for _, datap := range activeModules() { 165 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData)) 166 } 167 168 case baseBSS <= i && i < baseSpans: 169 for _, datap := range activeModules() { 170 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS)) 171 } 172 173 case i == fixedRootFinalizers: 174 for fb := allfin; fb != nil; fb = fb.alllink { 175 cnt := uintptr(atomic.Load(&fb.cnt)) 176 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) 177 } 178 179 case i == fixedRootFreeGStacks: 180 // Switch to the system stack so we can call 181 // stackfree. 182 systemstack(markrootFreeGStacks) 183 184 case baseSpans <= i && i < baseStacks: 185 // mark mspan.specials 186 markrootSpans(gcw, int(i-baseSpans)) 187 188 default: 189 // the rest is scanning goroutine stacks 190 var gp *g 191 if baseStacks <= i && i < end { 192 gp = allgs[i-baseStacks] 193 } else { 194 throw("markroot: bad index") 195 } 196 197 // remember when we've first observed the G blocked 198 // needed only to output in traceback 199 status := readgstatus(gp) // We are not in a scan state 200 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 201 gp.waitsince = work.tstart 202 } 203 204 // scanstack must be done on the system stack in case 205 // we're trying to scan our own stack. 206 systemstack(func() { 207 // If this is a self-scan, put the user G in 208 // _Gwaiting to prevent self-deadlock. It may 209 // already be in _Gwaiting if this is a mark 210 // worker or we're in mark termination. 211 userG := getg().m.curg 212 selfScan := gp == userG && readgstatus(userG) == _Grunning 213 if selfScan { 214 casgstatus(userG, _Grunning, _Gwaiting) 215 userG.waitreason = waitReasonGarbageCollectionScan 216 } 217 218 // TODO: suspendG blocks (and spins) until gp 219 // stops, which may take a while for 220 // running goroutines. Consider doing this in 221 // two phases where the first is non-blocking: 222 // we scan the stacks we can and ask running 223 // goroutines to scan themselves; and the 224 // second blocks. 225 stopped := suspendG(gp) 226 if stopped.dead { 227 gp.gcscandone = true 228 return 229 } 230 if gp.gcscandone { 231 throw("g already scanned") 232 } 233 scanstack(gp, gcw) 234 gp.gcscandone = true 235 resumeG(stopped) 236 237 if selfScan { 238 casgstatus(userG, _Gwaiting, _Grunning) 239 } 240 }) 241 } 242 } 243 244 // markrootBlock scans the shard'th shard of the block of memory [b0, 245 // b0+n0), with the given pointer mask. 246 // 247 //go:nowritebarrier 248 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { 249 if rootBlockBytes%(8*sys.PtrSize) != 0 { 250 // This is necessary to pick byte offsets in ptrmask0. 251 throw("rootBlockBytes must be a multiple of 8*ptrSize") 252 } 253 254 // Note that if b0 is toward the end of the address space, 255 // then b0 + rootBlockBytes might wrap around. 256 // These tests are written to avoid any possible overflow. 257 off := uintptr(shard) * rootBlockBytes 258 if off >= n0 { 259 return 260 } 261 b := b0 + off 262 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize)))) 263 n := uintptr(rootBlockBytes) 264 if off+n > n0 { 265 n = n0 - off 266 } 267 268 // Scan this shard. 269 scanblock(b, n, ptrmask, gcw, nil) 270 } 271 272 // markrootFreeGStacks frees stacks of dead Gs. 273 // 274 // This does not free stacks of dead Gs cached on Ps, but having a few 275 // cached stacks around isn't a problem. 276 func markrootFreeGStacks() { 277 // Take list of dead Gs with stacks. 278 lock(&sched.gFree.lock) 279 list := sched.gFree.stack 280 sched.gFree.stack = gList{} 281 unlock(&sched.gFree.lock) 282 if list.empty() { 283 return 284 } 285 286 // Free stacks. 287 q := gQueue{list.head, list.head} 288 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 289 stackfree(gp.stack) 290 gp.stack.lo = 0 291 gp.stack.hi = 0 292 // Manipulate the queue directly since the Gs are 293 // already all linked the right way. 294 q.tail.set(gp) 295 } 296 297 // Put Gs back on the free list. 298 lock(&sched.gFree.lock) 299 sched.gFree.noStack.pushAll(q) 300 unlock(&sched.gFree.lock) 301 } 302 303 // markrootSpans marks roots for one shard of markArenas. 304 // 305 //go:nowritebarrier 306 func markrootSpans(gcw *gcWork, shard int) { 307 // Objects with finalizers have two GC-related invariants: 308 // 309 // 1) Everything reachable from the object must be marked. 310 // This ensures that when we pass the object to its finalizer, 311 // everything the finalizer can reach will be retained. 312 // 313 // 2) Finalizer specials (which are not in the garbage 314 // collected heap) are roots. In practice, this means the fn 315 // field must be scanned. 316 sg := mheap_.sweepgen 317 318 // Find the arena and page index into that arena for this shard. 319 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)] 320 ha := mheap_.arenas[ai.l1()][ai.l2()] 321 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena) 322 323 // Construct slice of bitmap which we'll iterate over. 324 specialsbits := ha.pageSpecials[arenaPage/8:] 325 specialsbits = specialsbits[:pagesPerSpanRoot/8] 326 for i := range specialsbits { 327 // Find set bits, which correspond to spans with specials. 328 specials := atomic.Load8(&specialsbits[i]) 329 if specials == 0 { 330 continue 331 } 332 for j := uint(0); j < 8; j++ { 333 if specials&(1<<j) == 0 { 334 continue 335 } 336 // Find the span for this bit. 337 // 338 // This value is guaranteed to be non-nil because having 339 // specials implies that the span is in-use, and since we're 340 // currently marking we can be sure that we don't have to worry 341 // about the span being freed and re-used. 342 s := ha.spans[arenaPage+uint(i)*8+j] 343 344 // The state must be mSpanInUse if the specials bit is set, so 345 // sanity check that. 346 if state := s.state.get(); state != mSpanInUse { 347 print("s.state = ", state, "\n") 348 throw("non in-use span found with specials bit set") 349 } 350 // Check that this span was swept (it may be cached or uncached). 351 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { 352 // sweepgen was updated (+2) during non-checkmark GC pass 353 print("sweep ", s.sweepgen, " ", sg, "\n") 354 throw("gc: unswept span") 355 } 356 357 // Lock the specials to prevent a special from being 358 // removed from the list while we're traversing it. 359 lock(&s.speciallock) 360 for sp := s.specials; sp != nil; sp = sp.next { 361 if sp.kind != _KindSpecialFinalizer { 362 continue 363 } 364 // don't mark finalized object, but scan it so we 365 // retain everything it points to. 366 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 367 // A finalizer can be set for an inner byte of an object, find object beginning. 368 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 369 370 // Mark everything that can be reached from 371 // the object (but *not* the object itself or 372 // we'll never collect it). 373 scanobject(p, gcw) 374 375 // The special itself is a root. 376 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) 377 } 378 unlock(&s.speciallock) 379 } 380 } 381 } 382 383 // gcAssistAlloc performs GC work to make gp's assist debt positive. 384 // gp must be the calling user gorountine. 385 // 386 // This must be called with preemption enabled. 387 func gcAssistAlloc(gp *g) { 388 // Don't assist in non-preemptible contexts. These are 389 // generally fragile and won't allow the assist to block. 390 if getg() == gp.m.g0 { 391 return 392 } 393 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 394 return 395 } 396 397 traced := false 398 retry: 399 // Compute the amount of scan work we need to do to make the 400 // balance positive. When the required amount of work is low, 401 // we over-assist to build up credit for future allocations 402 // and amortize the cost of assisting. 403 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte)) 404 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork)) 405 debtBytes := -gp.gcAssistBytes 406 scanWork := int64(assistWorkPerByte * float64(debtBytes)) 407 if scanWork < gcOverAssistWork { 408 scanWork = gcOverAssistWork 409 debtBytes = int64(assistBytesPerWork * float64(scanWork)) 410 } 411 412 // Steal as much credit as we can from the background GC's 413 // scan credit. This is racy and may drop the background 414 // credit below 0 if two mutators steal at the same time. This 415 // will just cause steals to fail until credit is accumulated 416 // again, so in the long run it doesn't really matter, but we 417 // do have to handle the negative credit case. 418 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 419 stolen := int64(0) 420 if bgScanCredit > 0 { 421 if bgScanCredit < scanWork { 422 stolen = bgScanCredit 423 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen)) 424 } else { 425 stolen = scanWork 426 gp.gcAssistBytes += debtBytes 427 } 428 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 429 430 scanWork -= stolen 431 432 if scanWork == 0 { 433 // We were able to steal all of the credit we 434 // needed. 435 if traced { 436 traceGCMarkAssistDone() 437 } 438 return 439 } 440 } 441 442 if trace.enabled && !traced { 443 traced = true 444 traceGCMarkAssistStart() 445 } 446 447 // Perform assist work 448 systemstack(func() { 449 gcAssistAlloc1(gp, scanWork) 450 // The user stack may have moved, so this can't touch 451 // anything on it until it returns from systemstack. 452 }) 453 454 completed := gp.param != nil 455 gp.param = nil 456 if completed { 457 gcMarkDone() 458 } 459 460 if gp.gcAssistBytes < 0 { 461 // We were unable steal enough credit or perform 462 // enough work to pay off the assist debt. We need to 463 // do one of these before letting the mutator allocate 464 // more to prevent over-allocation. 465 // 466 // If this is because we were preempted, reschedule 467 // and try some more. 468 if gp.preempt { 469 Gosched() 470 goto retry 471 } 472 473 // Add this G to an assist queue and park. When the GC 474 // has more background credit, it will satisfy queued 475 // assists before flushing to the global credit pool. 476 // 477 // Note that this does *not* get woken up when more 478 // work is added to the work list. The theory is that 479 // there wasn't enough work to do anyway, so we might 480 // as well let background marking take care of the 481 // work that is available. 482 if !gcParkAssist() { 483 goto retry 484 } 485 486 // At this point either background GC has satisfied 487 // this G's assist debt, or the GC cycle is over. 488 } 489 if traced { 490 traceGCMarkAssistDone() 491 } 492 } 493 494 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system 495 // stack. This is a separate function to make it easier to see that 496 // we're not capturing anything from the user stack, since the user 497 // stack may move while we're in this function. 498 // 499 // gcAssistAlloc1 indicates whether this assist completed the mark 500 // phase by setting gp.param to non-nil. This can't be communicated on 501 // the stack since it may move. 502 // 503 //go:systemstack 504 func gcAssistAlloc1(gp *g, scanWork int64) { 505 // Clear the flag indicating that this assist completed the 506 // mark phase. 507 gp.param = nil 508 509 if atomic.Load(&gcBlackenEnabled) == 0 { 510 // The gcBlackenEnabled check in malloc races with the 511 // store that clears it but an atomic check in every malloc 512 // would be a performance hit. 513 // Instead we recheck it here on the non-preemptable system 514 // stack to determine if we should perform an assist. 515 516 // GC is done, so ignore any remaining debt. 517 gp.gcAssistBytes = 0 518 return 519 } 520 // Track time spent in this assist. Since we're on the 521 // system stack, this is non-preemptible, so we can 522 // just measure start and end time. 523 startTime := nanotime() 524 525 decnwait := atomic.Xadd(&work.nwait, -1) 526 if decnwait == work.nproc { 527 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 528 throw("nwait > work.nprocs") 529 } 530 531 // gcDrainN requires the caller to be preemptible. 532 casgstatus(gp, _Grunning, _Gwaiting) 533 gp.waitreason = waitReasonGCAssistMarking 534 535 // drain own cached work first in the hopes that it 536 // will be more cache friendly. 537 gcw := &getg().m.p.ptr().gcw 538 workDone := gcDrainN(gcw, scanWork) 539 540 casgstatus(gp, _Gwaiting, _Grunning) 541 542 // Record that we did this much scan work. 543 // 544 // Back out the number of bytes of assist credit that 545 // this scan work counts for. The "1+" is a poor man's 546 // round-up, to ensure this adds credit even if 547 // assistBytesPerWork is very low. 548 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork)) 549 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone)) 550 551 // If this is the last worker and we ran out of work, 552 // signal a completion point. 553 incnwait := atomic.Xadd(&work.nwait, +1) 554 if incnwait > work.nproc { 555 println("runtime: work.nwait=", incnwait, 556 "work.nproc=", work.nproc) 557 throw("work.nwait > work.nproc") 558 } 559 560 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 561 // This has reached a background completion point. Set 562 // gp.param to a non-nil value to indicate this. It 563 // doesn't matter what we set it to (it just has to be 564 // a valid pointer). 565 gp.param = unsafe.Pointer(gp) 566 } 567 duration := nanotime() - startTime 568 _p_ := gp.m.p.ptr() 569 _p_.gcAssistTime += duration 570 if _p_.gcAssistTime > gcAssistTimeSlack { 571 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 572 _p_.gcAssistTime = 0 573 } 574 } 575 576 // gcWakeAllAssists wakes all currently blocked assists. This is used 577 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 578 // new assists from going to sleep after this point. 579 func gcWakeAllAssists() { 580 lock(&work.assistQueue.lock) 581 list := work.assistQueue.q.popList() 582 injectglist(&list) 583 unlock(&work.assistQueue.lock) 584 } 585 586 // gcParkAssist puts the current goroutine on the assist queue and parks. 587 // 588 // gcParkAssist reports whether the assist is now satisfied. If it 589 // returns false, the caller must retry the assist. 590 // 591 //go:nowritebarrier 592 func gcParkAssist() bool { 593 lock(&work.assistQueue.lock) 594 // If the GC cycle finished while we were getting the lock, 595 // exit the assist. The cycle can't finish while we hold the 596 // lock. 597 if atomic.Load(&gcBlackenEnabled) == 0 { 598 unlock(&work.assistQueue.lock) 599 return true 600 } 601 602 gp := getg() 603 oldList := work.assistQueue.q 604 work.assistQueue.q.pushBack(gp) 605 606 // Recheck for background credit now that this G is in 607 // the queue, but can still back out. This avoids a 608 // race in case background marking has flushed more 609 // credit since we checked above. 610 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 611 work.assistQueue.q = oldList 612 if oldList.tail != 0 { 613 oldList.tail.ptr().schedlink.set(nil) 614 } 615 unlock(&work.assistQueue.lock) 616 return false 617 } 618 // Park. 619 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2) 620 return true 621 } 622 623 // gcFlushBgCredit flushes scanWork units of background scan work 624 // credit. This first satisfies blocked assists on the 625 // work.assistQueue and then flushes any remaining credit to 626 // gcController.bgScanCredit. 627 // 628 // Write barriers are disallowed because this is used by gcDrain after 629 // it has ensured that all work is drained and this must preserve that 630 // condition. 631 // 632 //go:nowritebarrierrec 633 func gcFlushBgCredit(scanWork int64) { 634 if work.assistQueue.q.empty() { 635 // Fast path; there are no blocked assists. There's a 636 // small window here where an assist may add itself to 637 // the blocked queue and park. If that happens, we'll 638 // just get it on the next flush. 639 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 640 return 641 } 642 643 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork)) 644 scanBytes := int64(float64(scanWork) * assistBytesPerWork) 645 646 lock(&work.assistQueue.lock) 647 for !work.assistQueue.q.empty() && scanBytes > 0 { 648 gp := work.assistQueue.q.pop() 649 // Note that gp.gcAssistBytes is negative because gp 650 // is in debt. Think carefully about the signs below. 651 if scanBytes+gp.gcAssistBytes >= 0 { 652 // Satisfy this entire assist debt. 653 scanBytes += gp.gcAssistBytes 654 gp.gcAssistBytes = 0 655 // It's important that we *not* put gp in 656 // runnext. Otherwise, it's possible for user 657 // code to exploit the GC worker's high 658 // scheduler priority to get itself always run 659 // before other goroutines and always in the 660 // fresh quantum started by GC. 661 ready(gp, 0, false) 662 } else { 663 // Partially satisfy this assist. 664 gp.gcAssistBytes += scanBytes 665 scanBytes = 0 666 // As a heuristic, we move this assist to the 667 // back of the queue so that large assists 668 // can't clog up the assist queue and 669 // substantially delay small assists. 670 work.assistQueue.q.pushBack(gp) 671 break 672 } 673 } 674 675 if scanBytes > 0 { 676 // Convert from scan bytes back to work. 677 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte)) 678 scanWork = int64(float64(scanBytes) * assistWorkPerByte) 679 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 680 } 681 unlock(&work.assistQueue.lock) 682 } 683 684 // scanstack scans gp's stack, greying all pointers found on the stack. 685 // 686 // scanstack will also shrink the stack if it is safe to do so. If it 687 // is not, it schedules a stack shrink for the next synchronous safe 688 // point. 689 // 690 // scanstack is marked go:systemstack because it must not be preempted 691 // while using a workbuf. 692 // 693 //go:nowritebarrier 694 //go:systemstack 695 func scanstack(gp *g, gcw *gcWork) { 696 if readgstatus(gp)&_Gscan == 0 { 697 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 698 throw("scanstack - bad status") 699 } 700 701 switch readgstatus(gp) &^ _Gscan { 702 default: 703 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 704 throw("mark - bad status") 705 case _Gdead: 706 return 707 case _Grunning: 708 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 709 throw("scanstack: goroutine not stopped") 710 case _Grunnable, _Gsyscall, _Gwaiting: 711 // ok 712 } 713 714 if gp == getg() { 715 throw("can't scan our own stack") 716 } 717 718 if isShrinkStackSafe(gp) { 719 // Shrink the stack if not much of it is being used. 720 shrinkstack(gp) 721 } else { 722 // Otherwise, shrink the stack at the next sync safe point. 723 gp.preemptShrink = true 724 } 725 726 var state stackScanState 727 state.stack = gp.stack 728 729 if stackTraceDebug { 730 println("stack trace goroutine", gp.goid) 731 } 732 733 if debugScanConservative && gp.asyncSafePoint { 734 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n") 735 } 736 737 // Scan the saved context register. This is effectively a live 738 // register that gets moved back and forth between the 739 // register and sched.ctxt without a write barrier. 740 if gp.sched.ctxt != nil { 741 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state) 742 } 743 744 // Scan the stack. Accumulate a list of stack objects. 745 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 746 scanframeworker(frame, &state, gcw) 747 return true 748 } 749 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 750 751 // Find additional pointers that point into the stack from the heap. 752 // Currently this includes defers and panics. See also function copystack. 753 754 // Find and trace all defer arguments. 755 tracebackdefers(gp, scanframe, nil) 756 757 // Find and trace other pointers in defer records. 758 for d := gp._defer; d != nil; d = d.link { 759 if d.fn != nil { 760 // tracebackdefers above does not scan the func value, which could 761 // be a stack allocated closure. See issue 30453. 762 scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state) 763 } 764 if d.link != nil { 765 // The link field of a stack-allocated defer record might point 766 // to a heap-allocated defer record. Keep that heap record live. 767 scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state) 768 } 769 // Retain defers records themselves. 770 // Defer records might not be reachable from the G through regular heap 771 // tracing because the defer linked list might weave between the stack and the heap. 772 if d.heap { 773 scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state) 774 } 775 } 776 if gp._panic != nil { 777 // Panics are always stack allocated. 778 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false) 779 } 780 781 // Find and scan all reachable stack objects. 782 // 783 // The state's pointer queue prioritizes precise pointers over 784 // conservative pointers so that we'll prefer scanning stack 785 // objects precisely. 786 state.buildIndex() 787 for { 788 p, conservative := state.getPtr() 789 if p == 0 { 790 break 791 } 792 obj := state.findObject(p) 793 if obj == nil { 794 continue 795 } 796 t := obj.typ 797 if t == nil { 798 // We've already scanned this object. 799 continue 800 } 801 obj.setType(nil) // Don't scan it again. 802 if stackTraceDebug { 803 printlock() 804 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string()) 805 if conservative { 806 print(" (conservative)") 807 } 808 println() 809 printunlock() 810 } 811 gcdata := t.gcdata 812 var s *mspan 813 if t.kind&kindGCProg != 0 { 814 // This path is pretty unlikely, an object large enough 815 // to have a GC program allocated on the stack. 816 // We need some space to unpack the program into a straight 817 // bitmask, which we allocate/free here. 818 // TODO: it would be nice if there were a way to run a GC 819 // program without having to store all its bits. We'd have 820 // to change from a Lempel-Ziv style program to something else. 821 // Or we can forbid putting objects on stacks if they require 822 // a gc program (see issue 27447). 823 s = materializeGCProg(t.ptrdata, gcdata) 824 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 825 } 826 827 b := state.stack.lo + uintptr(obj.off) 828 if conservative { 829 scanConservative(b, t.ptrdata, gcdata, gcw, &state) 830 } else { 831 scanblock(b, t.ptrdata, gcdata, gcw, &state) 832 } 833 834 if s != nil { 835 dematerializeGCProg(s) 836 } 837 } 838 839 // Deallocate object buffers. 840 // (Pointer buffers were all deallocated in the loop above.) 841 for state.head != nil { 842 x := state.head 843 state.head = x.next 844 if stackTraceDebug { 845 for i := 0; i < x.nobj; i++ { 846 obj := &x.obj[i] 847 if obj.typ == nil { // reachable 848 continue 849 } 850 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string()) 851 // Note: not necessarily really dead - only reachable-from-ptr dead. 852 } 853 } 854 x.nobj = 0 855 putempty((*workbuf)(unsafe.Pointer(x))) 856 } 857 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil { 858 throw("remaining pointer buffers") 859 } 860 } 861 862 // Scan a stack frame: local variables and function arguments/results. 863 //go:nowritebarrier 864 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { 865 if _DebugGC > 1 && frame.continpc != 0 { 866 print("scanframe ", funcname(frame.fn), "\n") 867 } 868 869 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt 870 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV1 871 if state.conservative || isAsyncPreempt || isDebugCall { 872 if debugScanConservative { 873 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc)) 874 } 875 876 // Conservatively scan the frame. Unlike the precise 877 // case, this includes the outgoing argument space 878 // since we may have stopped while this function was 879 // setting up a call. 880 // 881 // TODO: We could narrow this down if the compiler 882 // produced a single map per function of stack slots 883 // and registers that ever contain a pointer. 884 if frame.varp != 0 { 885 size := frame.varp - frame.sp 886 if size > 0 { 887 scanConservative(frame.sp, size, nil, gcw, state) 888 } 889 } 890 891 // Scan arguments to this frame. 892 if frame.arglen != 0 { 893 // TODO: We could pass the entry argument map 894 // to narrow this down further. 895 scanConservative(frame.argp, frame.arglen, nil, gcw, state) 896 } 897 898 if isAsyncPreempt || isDebugCall { 899 // This function's frame contained the 900 // registers for the asynchronously stopped 901 // parent frame. Scan the parent 902 // conservatively. 903 state.conservative = true 904 } else { 905 // We only wanted to scan those two frames 906 // conservatively. Clear the flag for future 907 // frames. 908 state.conservative = false 909 } 910 return 911 } 912 913 locals, args, objs := getStackMap(frame, &state.cache, false) 914 915 // Scan local variables if stack frame has been allocated. 916 if locals.n > 0 { 917 size := uintptr(locals.n) * sys.PtrSize 918 scanblock(frame.varp-size, size, locals.bytedata, gcw, state) 919 } 920 921 // Scan arguments. 922 if args.n > 0 { 923 scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state) 924 } 925 926 // Add all stack objects to the stack object list. 927 if frame.varp != 0 { 928 // varp is 0 for defers, where there are no locals. 929 // In that case, there can't be a pointer to its args, either. 930 // (And all args would be scanned above anyway.) 931 for _, obj := range objs { 932 off := obj.off 933 base := frame.varp // locals base pointer 934 if off >= 0 { 935 base = frame.argp // arguments and return values base pointer 936 } 937 ptr := base + uintptr(off) 938 if ptr < frame.sp { 939 // object hasn't been allocated in the frame yet. 940 continue 941 } 942 if stackTraceDebug { 943 println("stkobj at", hex(ptr), "of type", obj.typ.string()) 944 } 945 state.addObject(ptr, obj.typ) 946 } 947 } 948 } 949 950 type gcDrainFlags int 951 952 const ( 953 gcDrainUntilPreempt gcDrainFlags = 1 << iota 954 gcDrainFlushBgCredit 955 gcDrainIdle 956 gcDrainFractional 957 ) 958 959 // gcDrain scans roots and objects in work buffers, blackening grey 960 // objects until it is unable to get more work. It may return before 961 // GC is done; it's the caller's responsibility to balance work from 962 // other Ps. 963 // 964 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 965 // is set. 966 // 967 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work 968 // to do. 969 // 970 // If flags&gcDrainFractional != 0, gcDrain self-preempts when 971 // pollFractionalWorkerExit() returns true. This implies 972 // gcDrainNoBlock. 973 // 974 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 975 // credit to gcController.bgScanCredit every gcCreditSlack units of 976 // scan work. 977 // 978 // gcDrain will always return if there is a pending STW. 979 // 980 //go:nowritebarrier 981 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 982 if !writeBarrier.needed { 983 throw("gcDrain phase incorrect") 984 } 985 986 gp := getg().m.curg 987 preemptible := flags&gcDrainUntilPreempt != 0 988 flushBgCredit := flags&gcDrainFlushBgCredit != 0 989 idle := flags&gcDrainIdle != 0 990 991 initScanWork := gcw.scanWork 992 993 // checkWork is the scan work before performing the next 994 // self-preempt check. 995 checkWork := int64(1<<63 - 1) 996 var check func() bool 997 if flags&(gcDrainIdle|gcDrainFractional) != 0 { 998 checkWork = initScanWork + drainCheckThreshold 999 if idle { 1000 check = pollWork 1001 } else if flags&gcDrainFractional != 0 { 1002 check = pollFractionalWorkerExit 1003 } 1004 } 1005 1006 // Drain root marking jobs. 1007 if work.markrootNext < work.markrootJobs { 1008 // Stop if we're preemptible or if someone wants to STW. 1009 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { 1010 job := atomic.Xadd(&work.markrootNext, +1) - 1 1011 if job >= work.markrootJobs { 1012 break 1013 } 1014 markroot(gcw, job) 1015 if check != nil && check() { 1016 goto done 1017 } 1018 } 1019 } 1020 1021 // Drain heap marking jobs. 1022 // Stop if we're preemptible or if someone wants to STW. 1023 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { 1024 // Try to keep work available on the global queue. We used to 1025 // check if there were waiting workers, but it's better to 1026 // just keep work available than to make workers wait. In the 1027 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 1028 // balances. 1029 if work.full == 0 { 1030 gcw.balance() 1031 } 1032 1033 b := gcw.tryGetFast() 1034 if b == 0 { 1035 b = gcw.tryGet() 1036 if b == 0 { 1037 // Flush the write barrier 1038 // buffer; this may create 1039 // more work. 1040 wbBufFlush(nil, 0) 1041 b = gcw.tryGet() 1042 } 1043 } 1044 if b == 0 { 1045 // Unable to get work. 1046 break 1047 } 1048 scanobject(b, gcw) 1049 1050 // Flush background scan work credit to the global 1051 // account if we've accumulated enough locally so 1052 // mutator assists can draw on it. 1053 if gcw.scanWork >= gcCreditSlack { 1054 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1055 if flushBgCredit { 1056 gcFlushBgCredit(gcw.scanWork - initScanWork) 1057 initScanWork = 0 1058 } 1059 checkWork -= gcw.scanWork 1060 gcw.scanWork = 0 1061 1062 if checkWork <= 0 { 1063 checkWork += drainCheckThreshold 1064 if check != nil && check() { 1065 break 1066 } 1067 } 1068 } 1069 } 1070 1071 done: 1072 // Flush remaining scan work credit. 1073 if gcw.scanWork > 0 { 1074 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1075 if flushBgCredit { 1076 gcFlushBgCredit(gcw.scanWork - initScanWork) 1077 } 1078 gcw.scanWork = 0 1079 } 1080 } 1081 1082 // gcDrainN blackens grey objects until it has performed roughly 1083 // scanWork units of scan work or the G is preempted. This is 1084 // best-effort, so it may perform less work if it fails to get a work 1085 // buffer. Otherwise, it will perform at least n units of work, but 1086 // may perform more because scanning is always done in whole object 1087 // increments. It returns the amount of scan work performed. 1088 // 1089 // The caller goroutine must be in a preemptible state (e.g., 1090 // _Gwaiting) to prevent deadlocks during stack scanning. As a 1091 // consequence, this must be called on the system stack. 1092 // 1093 //go:nowritebarrier 1094 //go:systemstack 1095 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 1096 if !writeBarrier.needed { 1097 throw("gcDrainN phase incorrect") 1098 } 1099 1100 // There may already be scan work on the gcw, which we don't 1101 // want to claim was done by this call. 1102 workFlushed := -gcw.scanWork 1103 1104 gp := getg().m.curg 1105 for !gp.preempt && workFlushed+gcw.scanWork < scanWork { 1106 // See gcDrain comment. 1107 if work.full == 0 { 1108 gcw.balance() 1109 } 1110 1111 // This might be a good place to add prefetch code... 1112 // if(wbuf.nobj > 4) { 1113 // PREFETCH(wbuf->obj[wbuf.nobj - 3]; 1114 // } 1115 // 1116 b := gcw.tryGetFast() 1117 if b == 0 { 1118 b = gcw.tryGet() 1119 if b == 0 { 1120 // Flush the write barrier buffer; 1121 // this may create more work. 1122 wbBufFlush(nil, 0) 1123 b = gcw.tryGet() 1124 } 1125 } 1126 1127 if b == 0 { 1128 // Try to do a root job. 1129 // 1130 // TODO: Assists should get credit for this 1131 // work. 1132 if work.markrootNext < work.markrootJobs { 1133 job := atomic.Xadd(&work.markrootNext, +1) - 1 1134 if job < work.markrootJobs { 1135 markroot(gcw, job) 1136 continue 1137 } 1138 } 1139 // No heap or root jobs. 1140 break 1141 } 1142 scanobject(b, gcw) 1143 1144 // Flush background scan work credit. 1145 if gcw.scanWork >= gcCreditSlack { 1146 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1147 workFlushed += gcw.scanWork 1148 gcw.scanWork = 0 1149 } 1150 } 1151 1152 // Unlike gcDrain, there's no need to flush remaining work 1153 // here because this never flushes to bgScanCredit and 1154 // gcw.dispose will flush any remaining work to scanWork. 1155 1156 return workFlushed + gcw.scanWork 1157 } 1158 1159 // scanblock scans b as scanobject would, but using an explicit 1160 // pointer bitmap instead of the heap bitmap. 1161 // 1162 // This is used to scan non-heap roots, so it does not update 1163 // gcw.bytesMarked or gcw.scanWork. 1164 // 1165 // If stk != nil, possible stack pointers are also reported to stk.putPtr. 1166 //go:nowritebarrier 1167 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { 1168 // Use local copies of original parameters, so that a stack trace 1169 // due to one of the throws below shows the original block 1170 // base and extent. 1171 b := b0 1172 n := n0 1173 1174 for i := uintptr(0); i < n; { 1175 // Find bits for the next word. 1176 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8))) 1177 if bits == 0 { 1178 i += sys.PtrSize * 8 1179 continue 1180 } 1181 for j := 0; j < 8 && i < n; j++ { 1182 if bits&1 != 0 { 1183 // Same work as in scanobject; see comments there. 1184 p := *(*uintptr)(unsafe.Pointer(b + i)) 1185 if p != 0 { 1186 if obj, span, objIndex := findObject(p, b, i); obj != 0 { 1187 greyobject(obj, b, i, span, gcw, objIndex) 1188 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { 1189 stk.putPtr(p, false) 1190 } 1191 } 1192 } 1193 bits >>= 1 1194 i += sys.PtrSize 1195 } 1196 } 1197 } 1198 1199 // scanobject scans the object starting at b, adding pointers to gcw. 1200 // b must point to the beginning of a heap object or an oblet. 1201 // scanobject consults the GC bitmap for the pointer mask and the 1202 // spans for the size of the object. 1203 // 1204 //go:nowritebarrier 1205 func scanobject(b uintptr, gcw *gcWork) { 1206 // Find the bits for b and the size of the object at b. 1207 // 1208 // b is either the beginning of an object, in which case this 1209 // is the size of the object to scan, or it points to an 1210 // oblet, in which case we compute the size to scan below. 1211 hbits := heapBitsForAddr(b) 1212 s := spanOfUnchecked(b) 1213 n := s.elemsize 1214 if n == 0 { 1215 throw("scanobject n == 0") 1216 } 1217 1218 if n > maxObletBytes { 1219 // Large object. Break into oblets for better 1220 // parallelism and lower latency. 1221 if b == s.base() { 1222 // It's possible this is a noscan object (not 1223 // from greyobject, but from other code 1224 // paths), in which case we must *not* enqueue 1225 // oblets since their bitmaps will be 1226 // uninitialized. 1227 if s.spanclass.noscan() { 1228 // Bypass the whole scan. 1229 gcw.bytesMarked += uint64(n) 1230 return 1231 } 1232 1233 // Enqueue the other oblets to scan later. 1234 // Some oblets may be in b's scalar tail, but 1235 // these will be marked as "no more pointers", 1236 // so we'll drop out immediately when we go to 1237 // scan those. 1238 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { 1239 if !gcw.putFast(oblet) { 1240 gcw.put(oblet) 1241 } 1242 } 1243 } 1244 1245 // Compute the size of the oblet. Since this object 1246 // must be a large object, s.base() is the beginning 1247 // of the object. 1248 n = s.base() + s.elemsize - b 1249 if n > maxObletBytes { 1250 n = maxObletBytes 1251 } 1252 } 1253 1254 var i uintptr 1255 for i = 0; i < n; i += sys.PtrSize { 1256 // Find bits for this word. 1257 if i != 0 { 1258 // Avoid needless hbits.next() on last iteration. 1259 hbits = hbits.next() 1260 } 1261 // Load bits once. See CL 22712 and issue 16973 for discussion. 1262 bits := hbits.bits() 1263 if bits&bitScan == 0 { 1264 break // no more pointers in this object 1265 } 1266 if bits&bitPointer == 0 { 1267 continue // not a pointer 1268 } 1269 1270 // Work here is duplicated in scanblock and above. 1271 // If you make changes here, make changes there too. 1272 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1273 1274 // At this point we have extracted the next potential pointer. 1275 // Quickly filter out nil and pointers back to the current object. 1276 if obj != 0 && obj-b >= n { 1277 // Test if obj points into the Go heap and, if so, 1278 // mark the object. 1279 // 1280 // Note that it's possible for findObject to 1281 // fail if obj points to a just-allocated heap 1282 // object because of a race with growing the 1283 // heap. In this case, we know the object was 1284 // just allocated and hence will be marked by 1285 // allocation itself. 1286 if obj, span, objIndex := findObject(obj, b, i); obj != 0 { 1287 greyobject(obj, b, i, span, gcw, objIndex) 1288 } 1289 } 1290 } 1291 gcw.bytesMarked += uint64(n) 1292 gcw.scanWork += int64(i) 1293 } 1294 1295 // scanConservative scans block [b, b+n) conservatively, treating any 1296 // pointer-like value in the block as a pointer. 1297 // 1298 // If ptrmask != nil, only words that are marked in ptrmask are 1299 // considered as potential pointers. 1300 // 1301 // If state != nil, it's assumed that [b, b+n) is a block in the stack 1302 // and may contain pointers to stack objects. 1303 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) { 1304 if debugScanConservative { 1305 printlock() 1306 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n") 1307 hexdumpWords(b, b+n, func(p uintptr) byte { 1308 if ptrmask != nil { 1309 word := (p - b) / sys.PtrSize 1310 bits := *addb(ptrmask, word/8) 1311 if (bits>>(word%8))&1 == 0 { 1312 return '$' 1313 } 1314 } 1315 1316 val := *(*uintptr)(unsafe.Pointer(p)) 1317 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1318 return '@' 1319 } 1320 1321 span := spanOfHeap(val) 1322 if span == nil { 1323 return ' ' 1324 } 1325 idx := span.objIndex(val) 1326 if span.isFree(idx) { 1327 return ' ' 1328 } 1329 return '*' 1330 }) 1331 printunlock() 1332 } 1333 1334 for i := uintptr(0); i < n; i += sys.PtrSize { 1335 if ptrmask != nil { 1336 word := i / sys.PtrSize 1337 bits := *addb(ptrmask, word/8) 1338 if bits == 0 { 1339 // Skip 8 words (the loop increment will do the 8th) 1340 // 1341 // This must be the first time we've 1342 // seen this word of ptrmask, so i 1343 // must be 8-word-aligned, but check 1344 // our reasoning just in case. 1345 if i%(sys.PtrSize*8) != 0 { 1346 throw("misaligned mask") 1347 } 1348 i += sys.PtrSize*8 - sys.PtrSize 1349 continue 1350 } 1351 if (bits>>(word%8))&1 == 0 { 1352 continue 1353 } 1354 } 1355 1356 val := *(*uintptr)(unsafe.Pointer(b + i)) 1357 1358 // Check if val points into the stack. 1359 if state != nil && state.stack.lo <= val && val < state.stack.hi { 1360 // val may point to a stack object. This 1361 // object may be dead from last cycle and 1362 // hence may contain pointers to unallocated 1363 // objects, but unlike heap objects we can't 1364 // tell if it's already dead. Hence, if all 1365 // pointers to this object are from 1366 // conservative scanning, we have to scan it 1367 // defensively, too. 1368 state.putPtr(val, true) 1369 continue 1370 } 1371 1372 // Check if val points to a heap span. 1373 span := spanOfHeap(val) 1374 if span == nil { 1375 continue 1376 } 1377 1378 // Check if val points to an allocated object. 1379 idx := span.objIndex(val) 1380 if span.isFree(idx) { 1381 continue 1382 } 1383 1384 // val points to an allocated object. Mark it. 1385 obj := span.base() + idx*span.elemsize 1386 greyobject(obj, b, i, span, gcw, idx) 1387 } 1388 } 1389 1390 // Shade the object if it isn't already. 1391 // The object is not nil and known to be in the heap. 1392 // Preemption must be disabled. 1393 //go:nowritebarrier 1394 func shade(b uintptr) { 1395 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { 1396 gcw := &getg().m.p.ptr().gcw 1397 greyobject(obj, 0, 0, span, gcw, objIndex) 1398 } 1399 } 1400 1401 // obj is the start of an object with mark mbits. 1402 // If it isn't already marked, mark it and enqueue into gcw. 1403 // base and off are for debugging only and could be removed. 1404 // 1405 // See also wbBufFlush1, which partially duplicates this logic. 1406 // 1407 //go:nowritebarrierrec 1408 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) { 1409 // obj should be start of allocation, and so must be at least pointer-aligned. 1410 if obj&(sys.PtrSize-1) != 0 { 1411 throw("greyobject: obj not pointer-aligned") 1412 } 1413 mbits := span.markBitsForIndex(objIndex) 1414 1415 if useCheckmark { 1416 if setCheckmark(obj, base, off, mbits) { 1417 // Already marked. 1418 return 1419 } 1420 } else { 1421 if debug.gccheckmark > 0 && span.isFree(objIndex) { 1422 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") 1423 gcDumpObject("base", base, off) 1424 gcDumpObject("obj", obj, ^uintptr(0)) 1425 getg().m.traceback = 2 1426 throw("marking free object") 1427 } 1428 1429 // If marked we have nothing to do. 1430 if mbits.isMarked() { 1431 return 1432 } 1433 mbits.setMarked() 1434 1435 // Mark span. 1436 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1437 if arena.pageMarks[pageIdx]&pageMask == 0 { 1438 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1439 } 1440 1441 // If this is a noscan object, fast-track it to black 1442 // instead of greying it. 1443 if span.spanclass.noscan() { 1444 gcw.bytesMarked += uint64(span.elemsize) 1445 return 1446 } 1447 } 1448 1449 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but 1450 // seems like a nice optimization that can be added back in. 1451 // There needs to be time between the PREFETCH and the use. 1452 // Previously we put the obj in an 8 element buffer that is drained at a rate 1453 // to give the PREFETCH time to do its work. 1454 // Use of PREFETCHNTA might be more appropriate than PREFETCH 1455 if !gcw.putFast(obj) { 1456 gcw.put(obj) 1457 } 1458 } 1459 1460 // gcDumpObject dumps the contents of obj for debugging and marks the 1461 // field at byte offset off in obj. 1462 func gcDumpObject(label string, obj, off uintptr) { 1463 s := spanOf(obj) 1464 print(label, "=", hex(obj)) 1465 if s == nil { 1466 print(" s=nil\n") 1467 return 1468 } 1469 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=") 1470 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) { 1471 print(mSpanStateNames[state], "\n") 1472 } else { 1473 print("unknown(", state, ")\n") 1474 } 1475 1476 skipped := false 1477 size := s.elemsize 1478 if s.state.get() == mSpanManual && size == 0 { 1479 // We're printing something from a stack frame. We 1480 // don't know how big it is, so just show up to an 1481 // including off. 1482 size = off + sys.PtrSize 1483 } 1484 for i := uintptr(0); i < size; i += sys.PtrSize { 1485 // For big objects, just print the beginning (because 1486 // that usually hints at the object's type) and the 1487 // fields around off. 1488 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) { 1489 skipped = true 1490 continue 1491 } 1492 if skipped { 1493 print(" ...\n") 1494 skipped = false 1495 } 1496 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1497 if i == off { 1498 print(" <==") 1499 } 1500 print("\n") 1501 } 1502 if skipped { 1503 print(" ...\n") 1504 } 1505 } 1506 1507 // gcmarknewobject marks a newly allocated object black. obj must 1508 // not contain any non-nil pointers. 1509 // 1510 // This is nosplit so it can manipulate a gcWork without preemption. 1511 // 1512 //go:nowritebarrier 1513 //go:nosplit 1514 func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) { 1515 if useCheckmark { // The world should be stopped so this should not happen. 1516 throw("gcmarknewobject called while doing checkmark") 1517 } 1518 1519 // Mark object. 1520 objIndex := span.objIndex(obj) 1521 span.markBitsForIndex(objIndex).setMarked() 1522 1523 // Mark span. 1524 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1525 if arena.pageMarks[pageIdx]&pageMask == 0 { 1526 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1527 } 1528 1529 gcw := &getg().m.p.ptr().gcw 1530 gcw.bytesMarked += uint64(size) 1531 gcw.scanWork += int64(scanSize) 1532 } 1533 1534 // gcMarkTinyAllocs greys all active tiny alloc blocks. 1535 // 1536 // The world must be stopped. 1537 func gcMarkTinyAllocs() { 1538 assertWorldStopped() 1539 1540 for _, p := range allp { 1541 c := p.mcache 1542 if c == nil || c.tiny == 0 { 1543 continue 1544 } 1545 _, span, objIndex := findObject(c.tiny, 0, 0) 1546 gcw := &p.gcw 1547 greyobject(c.tiny, 0, 0, span, gcw, objIndex) 1548 } 1549 }