github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 const ( 16 fixedRootFinalizers = iota 17 fixedRootFreeGStacks 18 fixedRootCount 19 20 // rootBlockBytes is the number of bytes to scan per data or 21 // BSS root. 22 rootBlockBytes = 256 << 10 23 24 // rootBlockSpans is the number of spans to scan per span 25 // root. 26 rootBlockSpans = 8 * 1024 // 64MB worth of spans 27 28 // maxObletBytes is the maximum bytes of an object to scan at 29 // once. Larger objects will be split up into "oblets" of at 30 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds 31 // scan preemption at ~100 µs. 32 // 33 // This must be > _MaxSmallSize so that the object base is the 34 // span base. 35 maxObletBytes = 128 << 10 36 37 // idleCheckThreshold specifies how many units of work to do 38 // between run queue checks in an idle worker. Assuming a scan 39 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher 40 // overhead in the scan loop (the scheduler check may perform 41 // a syscall, so its overhead is nontrivial). Higher values 42 // make the system less responsive to incoming work. 43 idleCheckThreshold = 100000 44 ) 45 46 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 47 // some miscellany) and initializes scanning-related state. 48 // 49 // The caller must have call gcCopySpans(). 50 // 51 // The world must be stopped. 52 // 53 //go:nowritebarrier 54 func gcMarkRootPrepare() { 55 if gcphase == _GCmarktermination { 56 work.nFlushCacheRoots = int(gomaxprocs) 57 } else { 58 work.nFlushCacheRoots = 0 59 } 60 61 // Compute how many data and BSS root blocks there are. 62 nBlocks := func(bytes uintptr) int { 63 return int((bytes + rootBlockBytes - 1) / rootBlockBytes) 64 } 65 66 work.nDataRoots = 0 67 work.nBSSRoots = 0 68 69 // Only scan globals once per cycle; preferably concurrently. 70 if !work.markrootDone { 71 for _, datap := range activeModules() { 72 nDataRoots := nBlocks(datap.edata - datap.data) 73 if nDataRoots > work.nDataRoots { 74 work.nDataRoots = nDataRoots 75 } 76 } 77 78 for _, datap := range activeModules() { 79 nBSSRoots := nBlocks(datap.ebss - datap.bss) 80 if nBSSRoots > work.nBSSRoots { 81 work.nBSSRoots = nBSSRoots 82 } 83 } 84 } 85 86 if !work.markrootDone { 87 // On the first markroot, we need to scan span roots. 88 // In concurrent GC, this happens during concurrent 89 // mark and we depend on addfinalizer to ensure the 90 // above invariants for objects that get finalizers 91 // after concurrent mark. In STW GC, this will happen 92 // during mark termination. 93 // 94 // We're only interested in scanning the in-use spans, 95 // which will all be swept at this point. More spans 96 // may be added to this list during concurrent GC, but 97 // we only care about spans that were allocated before 98 // this mark phase. 99 work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() 100 101 // On the first markroot, we need to scan all Gs. Gs 102 // may be created after this point, but it's okay that 103 // we ignore them because they begin life without any 104 // roots, so there's nothing to scan, and any roots 105 // they create during the concurrent phase will be 106 // scanned during mark termination. During mark 107 // termination, allglen isn't changing, so we'll scan 108 // all Gs. 109 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 110 } else { 111 // We've already scanned span roots and kept the scan 112 // up-to-date during concurrent mark. 113 work.nSpanRoots = 0 114 115 // The hybrid barrier ensures that stacks can't 116 // contain pointers to unmarked objects, so on the 117 // second markroot, there's no need to scan stacks. 118 work.nStackRoots = 0 119 120 if debug.gcrescanstacks > 0 { 121 // Scan stacks anyway for debugging. 122 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 123 } 124 } 125 126 work.markrootNext = 0 127 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) 128 } 129 130 // gcMarkRootCheck checks that all roots have been scanned. It is 131 // purely for debugging. 132 func gcMarkRootCheck() { 133 if work.markrootNext < work.markrootJobs { 134 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 135 throw("left over markroot jobs") 136 } 137 138 lock(&allglock) 139 // Check that stacks have been scanned. 140 var gp *g 141 if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 { 142 for i := 0; i < len(allgs); i++ { 143 gp = allgs[i] 144 if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead { 145 goto fail 146 } 147 } 148 } else { 149 for i := 0; i < work.nStackRoots; i++ { 150 gp = allgs[i] 151 if !gp.gcscandone { 152 goto fail 153 } 154 } 155 } 156 unlock(&allglock) 157 return 158 159 fail: 160 println("gp", gp, "goid", gp.goid, 161 "status", readgstatus(gp), 162 "gcscandone", gp.gcscandone, 163 "gcscanvalid", gp.gcscanvalid) 164 unlock(&allglock) // Avoid self-deadlock with traceback. 165 throw("scan missed a g") 166 } 167 168 // ptrmask for an allocation containing a single pointer. 169 var oneptrmask = [...]uint8{1} 170 171 // markroot scans the i'th root. 172 // 173 // Preemption must be disabled (because this uses a gcWork). 174 // 175 // nowritebarrier is only advisory here. 176 // 177 //go:nowritebarrier 178 func markroot(gcw *gcWork, i uint32) { 179 // TODO(austin): This is a bit ridiculous. Compute and store 180 // the bases in gcMarkRootPrepare instead of the counts. 181 baseFlushCache := uint32(fixedRootCount) 182 baseData := baseFlushCache + uint32(work.nFlushCacheRoots) 183 baseBSS := baseData + uint32(work.nDataRoots) 184 baseSpans := baseBSS + uint32(work.nBSSRoots) 185 baseStacks := baseSpans + uint32(work.nSpanRoots) 186 end := baseStacks + uint32(work.nStackRoots) 187 188 // Note: if you add a case here, please also update heapdump.go:dumproots. 189 switch { 190 case baseFlushCache <= i && i < baseData: 191 flushmcache(int(i - baseFlushCache)) 192 193 case baseData <= i && i < baseBSS: 194 for _, datap := range activeModules() { 195 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData)) 196 } 197 198 case baseBSS <= i && i < baseSpans: 199 for _, datap := range activeModules() { 200 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS)) 201 } 202 203 case i == fixedRootFinalizers: 204 // Only do this once per GC cycle since we don't call 205 // queuefinalizer during marking. 206 if work.markrootDone { 207 break 208 } 209 for fb := allfin; fb != nil; fb = fb.alllink { 210 cnt := uintptr(atomic.Load(&fb.cnt)) 211 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) 212 } 213 214 case i == fixedRootFreeGStacks: 215 // Only do this once per GC cycle; preferably 216 // concurrently. 217 if !work.markrootDone { 218 // Switch to the system stack so we can call 219 // stackfree. 220 systemstack(markrootFreeGStacks) 221 } 222 223 case baseSpans <= i && i < baseStacks: 224 // mark MSpan.specials 225 markrootSpans(gcw, int(i-baseSpans)) 226 227 default: 228 // the rest is scanning goroutine stacks 229 var gp *g 230 if baseStacks <= i && i < end { 231 gp = allgs[i-baseStacks] 232 } else { 233 throw("markroot: bad index") 234 } 235 236 // remember when we've first observed the G blocked 237 // needed only to output in traceback 238 status := readgstatus(gp) // We are not in a scan state 239 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 240 gp.waitsince = work.tstart 241 } 242 243 // scang must be done on the system stack in case 244 // we're trying to scan our own stack. 245 systemstack(func() { 246 // If this is a self-scan, put the user G in 247 // _Gwaiting to prevent self-deadlock. It may 248 // already be in _Gwaiting if this is a mark 249 // worker or we're in mark termination. 250 userG := getg().m.curg 251 selfScan := gp == userG && readgstatus(userG) == _Grunning 252 if selfScan { 253 casgstatus(userG, _Grunning, _Gwaiting) 254 userG.waitreason = "garbage collection scan" 255 } 256 257 // TODO: scang blocks until gp's stack has 258 // been scanned, which may take a while for 259 // running goroutines. Consider doing this in 260 // two phases where the first is non-blocking: 261 // we scan the stacks we can and ask running 262 // goroutines to scan themselves; and the 263 // second blocks. 264 scang(gp, gcw) 265 266 if selfScan { 267 casgstatus(userG, _Gwaiting, _Grunning) 268 } 269 }) 270 } 271 } 272 273 // markrootBlock scans the shard'th shard of the block of memory [b0, 274 // b0+n0), with the given pointer mask. 275 // 276 //go:nowritebarrier 277 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { 278 if rootBlockBytes%(8*sys.PtrSize) != 0 { 279 // This is necessary to pick byte offsets in ptrmask0. 280 throw("rootBlockBytes must be a multiple of 8*ptrSize") 281 } 282 283 b := b0 + uintptr(shard)*rootBlockBytes 284 if b >= b0+n0 { 285 return 286 } 287 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize)))) 288 n := uintptr(rootBlockBytes) 289 if b+n > b0+n0 { 290 n = b0 + n0 - b 291 } 292 293 // Scan this shard. 294 scanblock(b, n, ptrmask, gcw) 295 } 296 297 // markrootFreeGStacks frees stacks of dead Gs. 298 // 299 // This does not free stacks of dead Gs cached on Ps, but having a few 300 // cached stacks around isn't a problem. 301 // 302 //TODO go:nowritebarrier 303 func markrootFreeGStacks() { 304 // Take list of dead Gs with stacks. 305 lock(&sched.gflock) 306 list := sched.gfreeStack 307 sched.gfreeStack = nil 308 unlock(&sched.gflock) 309 if list == nil { 310 return 311 } 312 313 // Free stacks. 314 tail := list 315 for gp := list; gp != nil; gp = gp.schedlink.ptr() { 316 shrinkstack(gp) 317 tail = gp 318 } 319 320 // Put Gs back on the free list. 321 lock(&sched.gflock) 322 tail.schedlink.set(sched.gfreeNoStack) 323 sched.gfreeNoStack = list 324 unlock(&sched.gflock) 325 } 326 327 // markrootSpans marks roots for one shard of work.spans. 328 // 329 //go:nowritebarrier 330 func markrootSpans(gcw *gcWork, shard int) { 331 // Objects with finalizers have two GC-related invariants: 332 // 333 // 1) Everything reachable from the object must be marked. 334 // This ensures that when we pass the object to its finalizer, 335 // everything the finalizer can reach will be retained. 336 // 337 // 2) Finalizer specials (which are not in the garbage 338 // collected heap) are roots. In practice, this means the fn 339 // field must be scanned. 340 // 341 // TODO(austin): There are several ideas for making this more 342 // efficient in issue #11485. 343 344 if work.markrootDone { 345 throw("markrootSpans during second markroot") 346 } 347 348 sg := mheap_.sweepgen 349 spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard) 350 // Note that work.spans may not include spans that were 351 // allocated between entering the scan phase and now. This is 352 // okay because any objects with finalizers in those spans 353 // must have been allocated and given finalizers after we 354 // entered the scan phase, so addfinalizer will have ensured 355 // the above invariants for them. 356 for _, s := range spans { 357 if s.state != mSpanInUse { 358 continue 359 } 360 if !useCheckmark && s.sweepgen != sg { 361 // sweepgen was updated (+2) during non-checkmark GC pass 362 print("sweep ", s.sweepgen, " ", sg, "\n") 363 throw("gc: unswept span") 364 } 365 366 // Speculatively check if there are any specials 367 // without acquiring the span lock. This may race with 368 // adding the first special to a span, but in that 369 // case addfinalizer will observe that the GC is 370 // active (which is globally synchronized) and ensure 371 // the above invariants. We may also ensure the 372 // invariants, but it's okay to scan an object twice. 373 if s.specials == nil { 374 continue 375 } 376 377 // Lock the specials to prevent a special from being 378 // removed from the list while we're traversing it. 379 lock(&s.speciallock) 380 381 for sp := s.specials; sp != nil; sp = sp.next { 382 if sp.kind != _KindSpecialFinalizer { 383 continue 384 } 385 // don't mark finalized object, but scan it so we 386 // retain everything it points to. 387 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 388 // A finalizer can be set for an inner byte of an object, find object beginning. 389 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 390 391 // Mark everything that can be reached from 392 // the object (but *not* the object itself or 393 // we'll never collect it). 394 scanobject(p, gcw) 395 396 // The special itself is a root. 397 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw) 398 } 399 400 unlock(&s.speciallock) 401 } 402 } 403 404 // gcAssistAlloc performs GC work to make gp's assist debt positive. 405 // gp must be the calling user gorountine. 406 // 407 // This must be called with preemption enabled. 408 func gcAssistAlloc(gp *g) { 409 // Don't assist in non-preemptible contexts. These are 410 // generally fragile and won't allow the assist to block. 411 if getg() == gp.m.g0 { 412 return 413 } 414 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 415 return 416 } 417 418 if trace.enabled { 419 traceGCMarkAssistStart() 420 } 421 422 retry: 423 // Compute the amount of scan work we need to do to make the 424 // balance positive. When the required amount of work is low, 425 // we over-assist to build up credit for future allocations 426 // and amortize the cost of assisting. 427 debtBytes := -gp.gcAssistBytes 428 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes)) 429 if scanWork < gcOverAssistWork { 430 scanWork = gcOverAssistWork 431 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork)) 432 } 433 434 // Steal as much credit as we can from the background GC's 435 // scan credit. This is racy and may drop the background 436 // credit below 0 if two mutators steal at the same time. This 437 // will just cause steals to fail until credit is accumulated 438 // again, so in the long run it doesn't really matter, but we 439 // do have to handle the negative credit case. 440 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 441 stolen := int64(0) 442 if bgScanCredit > 0 { 443 if bgScanCredit < scanWork { 444 stolen = bgScanCredit 445 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen)) 446 } else { 447 stolen = scanWork 448 gp.gcAssistBytes += debtBytes 449 } 450 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 451 452 scanWork -= stolen 453 454 if scanWork == 0 { 455 // We were able to steal all of the credit we 456 // needed. 457 if trace.enabled { 458 traceGCMarkAssistDone() 459 } 460 return 461 } 462 } 463 464 // Perform assist work 465 systemstack(func() { 466 gcAssistAlloc1(gp, scanWork) 467 // The user stack may have moved, so this can't touch 468 // anything on it until it returns from systemstack. 469 }) 470 471 completed := gp.param != nil 472 gp.param = nil 473 if completed { 474 gcMarkDone() 475 } 476 477 if gp.gcAssistBytes < 0 { 478 // We were unable steal enough credit or perform 479 // enough work to pay off the assist debt. We need to 480 // do one of these before letting the mutator allocate 481 // more to prevent over-allocation. 482 // 483 // If this is because we were preempted, reschedule 484 // and try some more. 485 if gp.preempt { 486 Gosched() 487 goto retry 488 } 489 490 // Add this G to an assist queue and park. When the GC 491 // has more background credit, it will satisfy queued 492 // assists before flushing to the global credit pool. 493 // 494 // Note that this does *not* get woken up when more 495 // work is added to the work list. The theory is that 496 // there wasn't enough work to do anyway, so we might 497 // as well let background marking take care of the 498 // work that is available. 499 if !gcParkAssist() { 500 goto retry 501 } 502 503 // At this point either background GC has satisfied 504 // this G's assist debt, or the GC cycle is over. 505 } 506 if trace.enabled { 507 traceGCMarkAssistDone() 508 } 509 } 510 511 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system 512 // stack. This is a separate function to make it easier to see that 513 // we're not capturing anything from the user stack, since the user 514 // stack may move while we're in this function. 515 // 516 // gcAssistAlloc1 indicates whether this assist completed the mark 517 // phase by setting gp.param to non-nil. This can't be communicated on 518 // the stack since it may move. 519 // 520 //go:systemstack 521 func gcAssistAlloc1(gp *g, scanWork int64) { 522 // Clear the flag indicating that this assist completed the 523 // mark phase. 524 gp.param = nil 525 526 if atomic.Load(&gcBlackenEnabled) == 0 { 527 // The gcBlackenEnabled check in malloc races with the 528 // store that clears it but an atomic check in every malloc 529 // would be a performance hit. 530 // Instead we recheck it here on the non-preemptable system 531 // stack to determine if we should preform an assist. 532 533 // GC is done, so ignore any remaining debt. 534 gp.gcAssistBytes = 0 535 return 536 } 537 // Track time spent in this assist. Since we're on the 538 // system stack, this is non-preemptible, so we can 539 // just measure start and end time. 540 startTime := nanotime() 541 542 decnwait := atomic.Xadd(&work.nwait, -1) 543 if decnwait == work.nproc { 544 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 545 throw("nwait > work.nprocs") 546 } 547 548 // gcDrainN requires the caller to be preemptible. 549 casgstatus(gp, _Grunning, _Gwaiting) 550 gp.waitreason = "GC assist marking" 551 552 // drain own cached work first in the hopes that it 553 // will be more cache friendly. 554 gcw := &getg().m.p.ptr().gcw 555 workDone := gcDrainN(gcw, scanWork) 556 // If we are near the end of the mark phase 557 // dispose of the gcw. 558 if gcBlackenPromptly { 559 gcw.dispose() 560 } 561 562 casgstatus(gp, _Gwaiting, _Grunning) 563 564 // Record that we did this much scan work. 565 // 566 // Back out the number of bytes of assist credit that 567 // this scan work counts for. The "1+" is a poor man's 568 // round-up, to ensure this adds credit even if 569 // assistBytesPerWork is very low. 570 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone)) 571 572 // If this is the last worker and we ran out of work, 573 // signal a completion point. 574 incnwait := atomic.Xadd(&work.nwait, +1) 575 if incnwait > work.nproc { 576 println("runtime: work.nwait=", incnwait, 577 "work.nproc=", work.nproc, 578 "gcBlackenPromptly=", gcBlackenPromptly) 579 throw("work.nwait > work.nproc") 580 } 581 582 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 583 // This has reached a background completion point. Set 584 // gp.param to a non-nil value to indicate this. It 585 // doesn't matter what we set it to (it just has to be 586 // a valid pointer). 587 gp.param = unsafe.Pointer(gp) 588 } 589 duration := nanotime() - startTime 590 _p_ := gp.m.p.ptr() 591 _p_.gcAssistTime += duration 592 if _p_.gcAssistTime > gcAssistTimeSlack { 593 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 594 _p_.gcAssistTime = 0 595 } 596 } 597 598 // gcWakeAllAssists wakes all currently blocked assists. This is used 599 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 600 // new assists from going to sleep after this point. 601 func gcWakeAllAssists() { 602 lock(&work.assistQueue.lock) 603 injectglist(work.assistQueue.head.ptr()) 604 work.assistQueue.head.set(nil) 605 work.assistQueue.tail.set(nil) 606 unlock(&work.assistQueue.lock) 607 } 608 609 // gcParkAssist puts the current goroutine on the assist queue and parks. 610 // 611 // gcParkAssist returns whether the assist is now satisfied. If it 612 // returns false, the caller must retry the assist. 613 // 614 //go:nowritebarrier 615 func gcParkAssist() bool { 616 lock(&work.assistQueue.lock) 617 // If the GC cycle finished while we were getting the lock, 618 // exit the assist. The cycle can't finish while we hold the 619 // lock. 620 if atomic.Load(&gcBlackenEnabled) == 0 { 621 unlock(&work.assistQueue.lock) 622 return true 623 } 624 625 gp := getg() 626 oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail 627 if oldHead == 0 { 628 work.assistQueue.head.set(gp) 629 } else { 630 oldTail.ptr().schedlink.set(gp) 631 } 632 work.assistQueue.tail.set(gp) 633 gp.schedlink.set(nil) 634 635 // Recheck for background credit now that this G is in 636 // the queue, but can still back out. This avoids a 637 // race in case background marking has flushed more 638 // credit since we checked above. 639 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 640 work.assistQueue.head = oldHead 641 work.assistQueue.tail = oldTail 642 if oldTail != 0 { 643 oldTail.ptr().schedlink.set(nil) 644 } 645 unlock(&work.assistQueue.lock) 646 return false 647 } 648 // Park. 649 goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlockGC, 2) 650 return true 651 } 652 653 // gcFlushBgCredit flushes scanWork units of background scan work 654 // credit. This first satisfies blocked assists on the 655 // work.assistQueue and then flushes any remaining credit to 656 // gcController.bgScanCredit. 657 // 658 // Write barriers are disallowed because this is used by gcDrain after 659 // it has ensured that all work is drained and this must preserve that 660 // condition. 661 // 662 //go:nowritebarrierrec 663 func gcFlushBgCredit(scanWork int64) { 664 if work.assistQueue.head == 0 { 665 // Fast path; there are no blocked assists. There's a 666 // small window here where an assist may add itself to 667 // the blocked queue and park. If that happens, we'll 668 // just get it on the next flush. 669 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 670 return 671 } 672 673 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork) 674 675 lock(&work.assistQueue.lock) 676 gp := work.assistQueue.head.ptr() 677 for gp != nil && scanBytes > 0 { 678 // Note that gp.gcAssistBytes is negative because gp 679 // is in debt. Think carefully about the signs below. 680 if scanBytes+gp.gcAssistBytes >= 0 { 681 // Satisfy this entire assist debt. 682 scanBytes += gp.gcAssistBytes 683 gp.gcAssistBytes = 0 684 xgp := gp 685 gp = gp.schedlink.ptr() 686 // It's important that we *not* put xgp in 687 // runnext. Otherwise, it's possible for user 688 // code to exploit the GC worker's high 689 // scheduler priority to get itself always run 690 // before other goroutines and always in the 691 // fresh quantum started by GC. 692 ready(xgp, 0, false) 693 } else { 694 // Partially satisfy this assist. 695 gp.gcAssistBytes += scanBytes 696 scanBytes = 0 697 // As a heuristic, we move this assist to the 698 // back of the queue so that large assists 699 // can't clog up the assist queue and 700 // substantially delay small assists. 701 xgp := gp 702 gp = gp.schedlink.ptr() 703 if gp == nil { 704 // gp is the only assist in the queue. 705 gp = xgp 706 } else { 707 xgp.schedlink = 0 708 work.assistQueue.tail.ptr().schedlink.set(xgp) 709 work.assistQueue.tail.set(xgp) 710 } 711 break 712 } 713 } 714 work.assistQueue.head.set(gp) 715 if gp == nil { 716 work.assistQueue.tail.set(nil) 717 } 718 719 if scanBytes > 0 { 720 // Convert from scan bytes back to work. 721 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte) 722 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 723 } 724 unlock(&work.assistQueue.lock) 725 } 726 727 // scanstack scans gp's stack, greying all pointers found on the stack. 728 // 729 // scanstack is marked go:systemstack because it must not be preempted 730 // while using a workbuf. 731 // 732 //go:nowritebarrier 733 //go:systemstack 734 func scanstack(gp *g, gcw *gcWork) { 735 if gp.gcscanvalid { 736 return 737 } 738 739 if readgstatus(gp)&_Gscan == 0 { 740 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 741 throw("scanstack - bad status") 742 } 743 744 switch readgstatus(gp) &^ _Gscan { 745 default: 746 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 747 throw("mark - bad status") 748 case _Gdead: 749 return 750 case _Grunning: 751 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 752 throw("scanstack: goroutine not stopped") 753 case _Grunnable, _Gsyscall, _Gwaiting: 754 // ok 755 } 756 757 if gp == getg() { 758 throw("can't scan our own stack") 759 } 760 mp := gp.m 761 if mp != nil && mp.helpgc != 0 { 762 throw("can't scan gchelper stack") 763 } 764 765 // Shrink the stack if not much of it is being used. During 766 // concurrent GC, we can do this during concurrent mark. 767 if !work.markrootDone { 768 shrinkstack(gp) 769 } 770 771 // Scan the stack. 772 var cache pcvalueCache 773 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 774 scanframeworker(frame, &cache, gcw) 775 return true 776 } 777 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 778 tracebackdefers(gp, scanframe, nil) 779 gp.gcscanvalid = true 780 } 781 782 // Scan a stack frame: local variables and function arguments/results. 783 //go:nowritebarrier 784 func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { 785 786 f := frame.fn 787 targetpc := frame.continpc 788 if targetpc == 0 { 789 // Frame is dead. 790 return 791 } 792 if _DebugGC > 1 { 793 print("scanframe ", funcname(f), "\n") 794 } 795 if targetpc != f.entry { 796 targetpc-- 797 } 798 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 799 if pcdata == -1 { 800 // We do not have a valid pcdata value but there might be a 801 // stackmap for this function. It is likely that we are looking 802 // at the function prologue, assume so and hope for the best. 803 pcdata = 0 804 } 805 806 // Scan local variables if stack frame has been allocated. 807 size := frame.varp - frame.sp 808 var minsize uintptr 809 switch sys.ArchFamily { 810 case sys.ARM64: 811 minsize = sys.SpAlign 812 default: 813 minsize = sys.MinFrameSize 814 } 815 if size > minsize { 816 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 817 if stkmap == nil || stkmap.n <= 0 { 818 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 819 throw("missing stackmap") 820 } 821 822 // Locals bitmap information, scan just the pointers in locals. 823 if pcdata < 0 || pcdata >= stkmap.n { 824 // don't know where we are 825 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 826 throw("scanframe: bad symbol table") 827 } 828 bv := stackmapdata(stkmap, pcdata) 829 size = uintptr(bv.n) * sys.PtrSize 830 scanblock(frame.varp-size, size, bv.bytedata, gcw) 831 } 832 833 // Scan arguments. 834 if frame.arglen > 0 { 835 var bv bitvector 836 if frame.argmap != nil { 837 bv = *frame.argmap 838 } else { 839 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 840 if stkmap == nil || stkmap.n <= 0 { 841 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 842 throw("missing stackmap") 843 } 844 if pcdata < 0 || pcdata >= stkmap.n { 845 // don't know where we are 846 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 847 throw("scanframe: bad symbol table") 848 } 849 bv = stackmapdata(stkmap, pcdata) 850 } 851 scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw) 852 } 853 } 854 855 type gcDrainFlags int 856 857 const ( 858 gcDrainUntilPreempt gcDrainFlags = 1 << iota 859 gcDrainNoBlock 860 gcDrainFlushBgCredit 861 gcDrainIdle 862 863 // gcDrainBlock means neither gcDrainUntilPreempt or 864 // gcDrainNoBlock. It is the default, but callers should use 865 // the constant for documentation purposes. 866 gcDrainBlock gcDrainFlags = 0 867 ) 868 869 // gcDrain scans roots and objects in work buffers, blackening grey 870 // objects until all roots and work buffers have been drained. 871 // 872 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 873 // is set. This implies gcDrainNoBlock. 874 // 875 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work 876 // to do. This implies gcDrainNoBlock. 877 // 878 // If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is 879 // unable to get more work. Otherwise, it will block until all 880 // blocking calls are blocked in gcDrain. 881 // 882 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 883 // credit to gcController.bgScanCredit every gcCreditSlack units of 884 // scan work. 885 // 886 //go:nowritebarrier 887 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 888 if !writeBarrier.needed { 889 throw("gcDrain phase incorrect") 890 } 891 892 gp := getg().m.curg 893 preemptible := flags&gcDrainUntilPreempt != 0 894 blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainNoBlock) == 0 895 flushBgCredit := flags&gcDrainFlushBgCredit != 0 896 idle := flags&gcDrainIdle != 0 897 898 initScanWork := gcw.scanWork 899 // idleCheck is the scan work at which to perform the next 900 // idle check with the scheduler. 901 idleCheck := initScanWork + idleCheckThreshold 902 903 // Drain root marking jobs. 904 if work.markrootNext < work.markrootJobs { 905 for !(preemptible && gp.preempt) { 906 job := atomic.Xadd(&work.markrootNext, +1) - 1 907 if job >= work.markrootJobs { 908 break 909 } 910 markroot(gcw, job) 911 if idle && pollWork() { 912 goto done 913 } 914 } 915 } 916 917 // Drain heap marking jobs. 918 for !(preemptible && gp.preempt) { 919 // Try to keep work available on the global queue. We used to 920 // check if there were waiting workers, but it's better to 921 // just keep work available than to make workers wait. In the 922 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 923 // balances. 924 if work.full == 0 { 925 gcw.balance() 926 } 927 928 var b uintptr 929 if blocking { 930 b = gcw.get() 931 } else { 932 b = gcw.tryGetFast() 933 if b == 0 { 934 b = gcw.tryGet() 935 } 936 } 937 if b == 0 { 938 // work barrier reached or tryGet failed. 939 break 940 } 941 scanobject(b, gcw) 942 943 // Flush background scan work credit to the global 944 // account if we've accumulated enough locally so 945 // mutator assists can draw on it. 946 if gcw.scanWork >= gcCreditSlack { 947 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 948 if flushBgCredit { 949 gcFlushBgCredit(gcw.scanWork - initScanWork) 950 initScanWork = 0 951 } 952 idleCheck -= gcw.scanWork 953 gcw.scanWork = 0 954 955 if idle && idleCheck <= 0 { 956 idleCheck += idleCheckThreshold 957 if pollWork() { 958 break 959 } 960 } 961 } 962 } 963 964 // In blocking mode, write barriers are not allowed after this 965 // point because we must preserve the condition that the work 966 // buffers are empty. 967 968 done: 969 // Flush remaining scan work credit. 970 if gcw.scanWork > 0 { 971 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 972 if flushBgCredit { 973 gcFlushBgCredit(gcw.scanWork - initScanWork) 974 } 975 gcw.scanWork = 0 976 } 977 } 978 979 // gcDrainN blackens grey objects until it has performed roughly 980 // scanWork units of scan work or the G is preempted. This is 981 // best-effort, so it may perform less work if it fails to get a work 982 // buffer. Otherwise, it will perform at least n units of work, but 983 // may perform more because scanning is always done in whole object 984 // increments. It returns the amount of scan work performed. 985 // 986 // The caller goroutine must be in a preemptible state (e.g., 987 // _Gwaiting) to prevent deadlocks during stack scanning. As a 988 // consequence, this must be called on the system stack. 989 // 990 //go:nowritebarrier 991 //go:systemstack 992 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 993 if !writeBarrier.needed { 994 throw("gcDrainN phase incorrect") 995 } 996 997 // There may already be scan work on the gcw, which we don't 998 // want to claim was done by this call. 999 workFlushed := -gcw.scanWork 1000 1001 gp := getg().m.curg 1002 for !gp.preempt && workFlushed+gcw.scanWork < scanWork { 1003 // See gcDrain comment. 1004 if work.full == 0 { 1005 gcw.balance() 1006 } 1007 1008 // This might be a good place to add prefetch code... 1009 // if(wbuf.nobj > 4) { 1010 // PREFETCH(wbuf->obj[wbuf.nobj - 3]; 1011 // } 1012 // 1013 b := gcw.tryGetFast() 1014 if b == 0 { 1015 b = gcw.tryGet() 1016 } 1017 1018 if b == 0 { 1019 // Try to do a root job. 1020 // 1021 // TODO: Assists should get credit for this 1022 // work. 1023 if work.markrootNext < work.markrootJobs { 1024 job := atomic.Xadd(&work.markrootNext, +1) - 1 1025 if job < work.markrootJobs { 1026 markroot(gcw, job) 1027 continue 1028 } 1029 } 1030 // No heap or root jobs. 1031 break 1032 } 1033 scanobject(b, gcw) 1034 1035 // Flush background scan work credit. 1036 if gcw.scanWork >= gcCreditSlack { 1037 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1038 workFlushed += gcw.scanWork 1039 gcw.scanWork = 0 1040 } 1041 } 1042 1043 // Unlike gcDrain, there's no need to flush remaining work 1044 // here because this never flushes to bgScanCredit and 1045 // gcw.dispose will flush any remaining work to scanWork. 1046 1047 return workFlushed + gcw.scanWork 1048 } 1049 1050 // scanblock scans b as scanobject would, but using an explicit 1051 // pointer bitmap instead of the heap bitmap. 1052 // 1053 // This is used to scan non-heap roots, so it does not update 1054 // gcw.bytesMarked or gcw.scanWork. 1055 // 1056 //go:nowritebarrier 1057 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { 1058 // Use local copies of original parameters, so that a stack trace 1059 // due to one of the throws below shows the original block 1060 // base and extent. 1061 b := b0 1062 n := n0 1063 1064 arena_start := mheap_.arena_start 1065 arena_used := mheap_.arena_used 1066 1067 for i := uintptr(0); i < n; { 1068 // Find bits for the next word. 1069 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8))) 1070 if bits == 0 { 1071 i += sys.PtrSize * 8 1072 continue 1073 } 1074 for j := 0; j < 8 && i < n; j++ { 1075 if bits&1 != 0 { 1076 // Same work as in scanobject; see comments there. 1077 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1078 if obj != 0 && arena_start <= obj && obj < arena_used { 1079 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1080 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1081 } 1082 } 1083 } 1084 bits >>= 1 1085 i += sys.PtrSize 1086 } 1087 } 1088 } 1089 1090 // scanobject scans the object starting at b, adding pointers to gcw. 1091 // b must point to the beginning of a heap object or an oblet. 1092 // scanobject consults the GC bitmap for the pointer mask and the 1093 // spans for the size of the object. 1094 // 1095 //go:nowritebarrier 1096 func scanobject(b uintptr, gcw *gcWork) { 1097 // Note that arena_used may change concurrently during 1098 // scanobject and hence scanobject may encounter a pointer to 1099 // a newly allocated heap object that is *not* in 1100 // [start,used). It will not mark this object; however, we 1101 // know that it was just installed by a mutator, which means 1102 // that mutator will execute a write barrier and take care of 1103 // marking it. This is even more pronounced on relaxed memory 1104 // architectures since we access arena_used without barriers 1105 // or synchronization, but the same logic applies. 1106 arena_start := mheap_.arena_start 1107 arena_used := mheap_.arena_used 1108 1109 // Find the bits for b and the size of the object at b. 1110 // 1111 // b is either the beginning of an object, in which case this 1112 // is the size of the object to scan, or it points to an 1113 // oblet, in which case we compute the size to scan below. 1114 hbits := heapBitsForAddr(b) 1115 s := spanOfUnchecked(b) 1116 n := s.elemsize 1117 if n == 0 { 1118 throw("scanobject n == 0") 1119 } 1120 1121 if n > maxObletBytes { 1122 // Large object. Break into oblets for better 1123 // parallelism and lower latency. 1124 if b == s.base() { 1125 // It's possible this is a noscan object (not 1126 // from greyobject, but from other code 1127 // paths), in which case we must *not* enqueue 1128 // oblets since their bitmaps will be 1129 // uninitialized. 1130 if !hbits.hasPointers(n) { 1131 // Bypass the whole scan. 1132 gcw.bytesMarked += uint64(n) 1133 return 1134 } 1135 1136 // Enqueue the other oblets to scan later. 1137 // Some oblets may be in b's scalar tail, but 1138 // these will be marked as "no more pointers", 1139 // so we'll drop out immediately when we go to 1140 // scan those. 1141 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { 1142 if !gcw.putFast(oblet) { 1143 gcw.put(oblet) 1144 } 1145 } 1146 } 1147 1148 // Compute the size of the oblet. Since this object 1149 // must be a large object, s.base() is the beginning 1150 // of the object. 1151 n = s.base() + s.elemsize - b 1152 if n > maxObletBytes { 1153 n = maxObletBytes 1154 } 1155 } 1156 1157 var i uintptr 1158 for i = 0; i < n; i += sys.PtrSize { 1159 // Find bits for this word. 1160 if i != 0 { 1161 // Avoid needless hbits.next() on last iteration. 1162 hbits = hbits.next() 1163 } 1164 // Load bits once. See CL 22712 and issue 16973 for discussion. 1165 bits := hbits.bits() 1166 // During checkmarking, 1-word objects store the checkmark 1167 // in the type bit for the one word. The only one-word objects 1168 // are pointers, or else they'd be merged with other non-pointer 1169 // data into larger allocations. 1170 if i != 1*sys.PtrSize && bits&bitScan == 0 { 1171 break // no more pointers in this object 1172 } 1173 if bits&bitPointer == 0 { 1174 continue // not a pointer 1175 } 1176 1177 // Work here is duplicated in scanblock and above. 1178 // If you make changes here, make changes there too. 1179 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1180 1181 // At this point we have extracted the next potential pointer. 1182 // Check if it points into heap and not back at the current object. 1183 if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n { 1184 // Mark the object. 1185 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1186 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1187 } 1188 } 1189 } 1190 gcw.bytesMarked += uint64(n) 1191 gcw.scanWork += int64(i) 1192 } 1193 1194 // Shade the object if it isn't already. 1195 // The object is not nil and known to be in the heap. 1196 // Preemption must be disabled. 1197 //go:nowritebarrier 1198 func shade(b uintptr) { 1199 if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 { 1200 gcw := &getg().m.p.ptr().gcw 1201 greyobject(obj, 0, 0, hbits, span, gcw, objIndex) 1202 if gcphase == _GCmarktermination || gcBlackenPromptly { 1203 // Ps aren't allowed to cache work during mark 1204 // termination. 1205 gcw.dispose() 1206 } 1207 } 1208 } 1209 1210 // obj is the start of an object with mark mbits. 1211 // If it isn't already marked, mark it and enqueue into gcw. 1212 // base and off are for debugging only and could be removed. 1213 //go:nowritebarrierrec 1214 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) { 1215 // obj should be start of allocation, and so must be at least pointer-aligned. 1216 if obj&(sys.PtrSize-1) != 0 { 1217 throw("greyobject: obj not pointer-aligned") 1218 } 1219 mbits := span.markBitsForIndex(objIndex) 1220 1221 if useCheckmark { 1222 if !mbits.isMarked() { 1223 printlock() 1224 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n") 1225 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") 1226 1227 // Dump the source (base) object 1228 gcDumpObject("base", base, off) 1229 1230 // Dump the object 1231 gcDumpObject("obj", obj, ^uintptr(0)) 1232 1233 getg().m.traceback = 2 1234 throw("checkmark found unmarked object") 1235 } 1236 if hbits.isCheckmarked(span.elemsize) { 1237 return 1238 } 1239 hbits.setCheckmarked(span.elemsize) 1240 if !hbits.isCheckmarked(span.elemsize) { 1241 throw("setCheckmarked and isCheckmarked disagree") 1242 } 1243 } else { 1244 if debug.gccheckmark > 0 && span.isFree(objIndex) { 1245 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") 1246 gcDumpObject("base", base, off) 1247 gcDumpObject("obj", obj, ^uintptr(0)) 1248 getg().m.traceback = 2 1249 throw("marking free object") 1250 } 1251 1252 // If marked we have nothing to do. 1253 if mbits.isMarked() { 1254 return 1255 } 1256 // mbits.setMarked() // Avoid extra call overhead with manual inlining. 1257 atomic.Or8(mbits.bytep, mbits.mask) 1258 // If this is a noscan object, fast-track it to black 1259 // instead of greying it. 1260 if !hbits.hasPointers(span.elemsize) { 1261 gcw.bytesMarked += uint64(span.elemsize) 1262 return 1263 } 1264 } 1265 1266 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but 1267 // seems like a nice optimization that can be added back in. 1268 // There needs to be time between the PREFETCH and the use. 1269 // Previously we put the obj in an 8 element buffer that is drained at a rate 1270 // to give the PREFETCH time to do its work. 1271 // Use of PREFETCHNTA might be more appropriate than PREFETCH 1272 if !gcw.putFast(obj) { 1273 gcw.put(obj) 1274 } 1275 } 1276 1277 // gcDumpObject dumps the contents of obj for debugging and marks the 1278 // field at byte offset off in obj. 1279 func gcDumpObject(label string, obj, off uintptr) { 1280 if obj < mheap_.arena_start || obj >= mheap_.arena_used { 1281 print(label, "=", hex(obj), " is not in the Go heap\n") 1282 return 1283 } 1284 k := obj >> _PageShift 1285 x := k 1286 x -= mheap_.arena_start >> _PageShift 1287 s := mheap_.spans[x] 1288 print(label, "=", hex(obj), " k=", hex(k)) 1289 if s == nil { 1290 print(" s=nil\n") 1291 return 1292 } 1293 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, " s.state=") 1294 if 0 <= s.state && int(s.state) < len(mSpanStateNames) { 1295 print(mSpanStateNames[s.state], "\n") 1296 } else { 1297 print("unknown(", s.state, ")\n") 1298 } 1299 1300 skipped := false 1301 size := s.elemsize 1302 if s.state == _MSpanManual && size == 0 { 1303 // We're printing something from a stack frame. We 1304 // don't know how big it is, so just show up to an 1305 // including off. 1306 size = off + sys.PtrSize 1307 } 1308 for i := uintptr(0); i < size; i += sys.PtrSize { 1309 // For big objects, just print the beginning (because 1310 // that usually hints at the object's type) and the 1311 // fields around off. 1312 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) { 1313 skipped = true 1314 continue 1315 } 1316 if skipped { 1317 print(" ...\n") 1318 skipped = false 1319 } 1320 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1321 if i == off { 1322 print(" <==") 1323 } 1324 print("\n") 1325 } 1326 if skipped { 1327 print(" ...\n") 1328 } 1329 } 1330 1331 // gcmarknewobject marks a newly allocated object black. obj must 1332 // not contain any non-nil pointers. 1333 // 1334 // This is nosplit so it can manipulate a gcWork without preemption. 1335 // 1336 //go:nowritebarrier 1337 //go:nosplit 1338 func gcmarknewobject(obj, size, scanSize uintptr) { 1339 if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen. 1340 throw("gcmarknewobject called while doing checkmark") 1341 } 1342 markBitsForAddr(obj).setMarked() 1343 gcw := &getg().m.p.ptr().gcw 1344 gcw.bytesMarked += uint64(size) 1345 gcw.scanWork += int64(scanSize) 1346 if gcBlackenPromptly { 1347 // There shouldn't be anything in the work queue, but 1348 // we still need to flush stats. 1349 gcw.dispose() 1350 } 1351 } 1352 1353 // gcMarkTinyAllocs greys all active tiny alloc blocks. 1354 // 1355 // The world must be stopped. 1356 func gcMarkTinyAllocs() { 1357 for _, p := range &allp { 1358 if p == nil || p.status == _Pdead { 1359 break 1360 } 1361 c := p.mcache 1362 if c == nil || c.tiny == 0 { 1363 continue 1364 } 1365 _, hbits, span, objIndex := heapBitsForObject(c.tiny, 0, 0) 1366 gcw := &p.gcw 1367 greyobject(c.tiny, 0, 0, hbits, span, gcw, objIndex) 1368 if gcBlackenPromptly { 1369 gcw.dispose() 1370 } 1371 } 1372 } 1373 1374 // Checkmarking 1375 1376 // To help debug the concurrent GC we remark with the world 1377 // stopped ensuring that any object encountered has their normal 1378 // mark bit set. To do this we use an orthogonal bit 1379 // pattern to indicate the object is marked. The following pattern 1380 // uses the upper two bits in the object's boundary nibble. 1381 // 01: scalar not marked 1382 // 10: pointer not marked 1383 // 11: pointer marked 1384 // 00: scalar marked 1385 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa. 1386 // The higher bit is 1 for pointers and 0 for scalars, whether the object 1387 // is marked or not. 1388 // The first nibble no longer holds the typeDead pattern indicating that the 1389 // there are no more pointers in the object. This information is held 1390 // in the second nibble. 1391 1392 // If useCheckmark is true, marking of an object uses the 1393 // checkmark bits (encoding above) instead of the standard 1394 // mark bits. 1395 var useCheckmark = false 1396 1397 //go:nowritebarrier 1398 func initCheckmarks() { 1399 useCheckmark = true 1400 for _, s := range mheap_.allspans { 1401 if s.state == _MSpanInUse { 1402 heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout()) 1403 } 1404 } 1405 } 1406 1407 func clearCheckmarks() { 1408 useCheckmark = false 1409 for _, s := range mheap_.allspans { 1410 if s.state == _MSpanInUse { 1411 heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout()) 1412 } 1413 } 1414 }