github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 const ( 16 fixedRootFinalizers = iota 17 fixedRootFreeGStacks 18 fixedRootCount 19 20 // rootBlockBytes is the number of bytes to scan per data or 21 // BSS root. 22 rootBlockBytes = 256 << 10 23 24 // rootBlockSpans is the number of spans to scan per span 25 // root. 26 rootBlockSpans = 8 * 1024 // 64MB worth of spans 27 28 // maxObletBytes is the maximum bytes of an object to scan at 29 // once. Larger objects will be split up into "oblets" of at 30 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds 31 // scan preemption at ~100 µs. 32 // 33 // This must be > _MaxSmallSize so that the object base is the 34 // span base. 35 maxObletBytes = 128 << 10 36 37 // drainCheckThreshold specifies how many units of work to do 38 // between self-preemption checks in gcDrain. Assuming a scan 39 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher 40 // overhead in the scan loop (the scheduler check may perform 41 // a syscall, so its overhead is nontrivial). Higher values 42 // make the system less responsive to incoming work. 43 drainCheckThreshold = 100000 44 ) 45 46 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 47 // some miscellany) and initializes scanning-related state. 48 // 49 // The caller must have call gcCopySpans(). 50 // 51 // The world must be stopped. 52 // 53 //go:nowritebarrier 54 func gcMarkRootPrepare() { 55 work.nFlushCacheRoots = 0 56 57 // Compute how many data and BSS root blocks there are. 58 nBlocks := func(bytes uintptr) int { 59 return int((bytes + rootBlockBytes - 1) / rootBlockBytes) 60 } 61 62 work.nDataRoots = 0 63 work.nBSSRoots = 0 64 65 // Scan globals. 66 for _, datap := range activeModules() { 67 nDataRoots := nBlocks(datap.edata - datap.data) 68 if nDataRoots > work.nDataRoots { 69 work.nDataRoots = nDataRoots 70 } 71 } 72 73 for _, datap := range activeModules() { 74 nBSSRoots := nBlocks(datap.ebss - datap.bss) 75 if nBSSRoots > work.nBSSRoots { 76 work.nBSSRoots = nBSSRoots 77 } 78 } 79 80 // Scan span roots for finalizer specials. 81 // 82 // We depend on addfinalizer to mark objects that get 83 // finalizers after root marking. 84 // 85 // We're only interested in scanning the in-use spans, 86 // which will all be swept at this point. More spans 87 // may be added to this list during concurrent GC, but 88 // we only care about spans that were allocated before 89 // this mark phase. 90 work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() 91 92 // Scan stacks. 93 // 94 // Gs may be created after this point, but it's okay that we 95 // ignore them because they begin life without any roots, so 96 // there's nothing to scan, and any roots they create during 97 // the concurrent phase will be scanned during mark 98 // termination. 99 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 100 101 work.markrootNext = 0 102 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) 103 } 104 105 // gcMarkRootCheck checks that all roots have been scanned. It is 106 // purely for debugging. 107 func gcMarkRootCheck() { 108 if work.markrootNext < work.markrootJobs { 109 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 110 throw("left over markroot jobs") 111 } 112 113 lock(&allglock) 114 // Check that stacks have been scanned. 115 var gp *g 116 for i := 0; i < work.nStackRoots; i++ { 117 gp = allgs[i] 118 if !gp.gcscandone { 119 goto fail 120 } 121 } 122 unlock(&allglock) 123 return 124 125 fail: 126 println("gp", gp, "goid", gp.goid, 127 "status", readgstatus(gp), 128 "gcscandone", gp.gcscandone, 129 "gcscanvalid", gp.gcscanvalid) 130 unlock(&allglock) // Avoid self-deadlock with traceback. 131 throw("scan missed a g") 132 } 133 134 // ptrmask for an allocation containing a single pointer. 135 var oneptrmask = [...]uint8{1} 136 137 // markroot scans the i'th root. 138 // 139 // Preemption must be disabled (because this uses a gcWork). 140 // 141 // nowritebarrier is only advisory here. 142 // 143 //go:nowritebarrier 144 func markroot(gcw *gcWork, i uint32) { 145 // TODO(austin): This is a bit ridiculous. Compute and store 146 // the bases in gcMarkRootPrepare instead of the counts. 147 baseFlushCache := uint32(fixedRootCount) 148 baseData := baseFlushCache + uint32(work.nFlushCacheRoots) 149 baseBSS := baseData + uint32(work.nDataRoots) 150 baseSpans := baseBSS + uint32(work.nBSSRoots) 151 baseStacks := baseSpans + uint32(work.nSpanRoots) 152 end := baseStacks + uint32(work.nStackRoots) 153 154 // Note: if you add a case here, please also update heapdump.go:dumproots. 155 switch { 156 case baseFlushCache <= i && i < baseData: 157 flushmcache(int(i - baseFlushCache)) 158 159 case baseData <= i && i < baseBSS: 160 for _, datap := range activeModules() { 161 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData)) 162 } 163 164 case baseBSS <= i && i < baseSpans: 165 for _, datap := range activeModules() { 166 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS)) 167 } 168 169 case i == fixedRootFinalizers: 170 for fb := allfin; fb != nil; fb = fb.alllink { 171 cnt := uintptr(atomic.Load(&fb.cnt)) 172 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) 173 } 174 175 case i == fixedRootFreeGStacks: 176 // Switch to the system stack so we can call 177 // stackfree. 178 systemstack(markrootFreeGStacks) 179 180 case baseSpans <= i && i < baseStacks: 181 // mark mspan.specials 182 markrootSpans(gcw, int(i-baseSpans)) 183 184 default: 185 // the rest is scanning goroutine stacks 186 var gp *g 187 if baseStacks <= i && i < end { 188 gp = allgs[i-baseStacks] 189 } else { 190 throw("markroot: bad index") 191 } 192 193 // remember when we've first observed the G blocked 194 // needed only to output in traceback 195 status := readgstatus(gp) // We are not in a scan state 196 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 197 gp.waitsince = work.tstart 198 } 199 200 // scang must be done on the system stack in case 201 // we're trying to scan our own stack. 202 systemstack(func() { 203 // If this is a self-scan, put the user G in 204 // _Gwaiting to prevent self-deadlock. It may 205 // already be in _Gwaiting if this is a mark 206 // worker or we're in mark termination. 207 userG := getg().m.curg 208 selfScan := gp == userG && readgstatus(userG) == _Grunning 209 if selfScan { 210 casgstatus(userG, _Grunning, _Gwaiting) 211 userG.waitreason = waitReasonGarbageCollectionScan 212 } 213 214 // TODO: scang blocks until gp's stack has 215 // been scanned, which may take a while for 216 // running goroutines. Consider doing this in 217 // two phases where the first is non-blocking: 218 // we scan the stacks we can and ask running 219 // goroutines to scan themselves; and the 220 // second blocks. 221 scang(gp, gcw) 222 223 if selfScan { 224 casgstatus(userG, _Gwaiting, _Grunning) 225 } 226 }) 227 } 228 } 229 230 // markrootBlock scans the shard'th shard of the block of memory [b0, 231 // b0+n0), with the given pointer mask. 232 // 233 //go:nowritebarrier 234 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { 235 if rootBlockBytes%(8*sys.PtrSize) != 0 { 236 // This is necessary to pick byte offsets in ptrmask0. 237 throw("rootBlockBytes must be a multiple of 8*ptrSize") 238 } 239 240 b := b0 + uintptr(shard)*rootBlockBytes 241 if b >= b0+n0 { 242 return 243 } 244 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize)))) 245 n := uintptr(rootBlockBytes) 246 if b+n > b0+n0 { 247 n = b0 + n0 - b 248 } 249 250 // Scan this shard. 251 scanblock(b, n, ptrmask, gcw, nil) 252 } 253 254 // markrootFreeGStacks frees stacks of dead Gs. 255 // 256 // This does not free stacks of dead Gs cached on Ps, but having a few 257 // cached stacks around isn't a problem. 258 // 259 //TODO go:nowritebarrier 260 func markrootFreeGStacks() { 261 // Take list of dead Gs with stacks. 262 lock(&sched.gFree.lock) 263 list := sched.gFree.stack 264 sched.gFree.stack = gList{} 265 unlock(&sched.gFree.lock) 266 if list.empty() { 267 return 268 } 269 270 // Free stacks. 271 q := gQueue{list.head, list.head} 272 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 273 shrinkstack(gp) 274 // Manipulate the queue directly since the Gs are 275 // already all linked the right way. 276 q.tail.set(gp) 277 } 278 279 // Put Gs back on the free list. 280 lock(&sched.gFree.lock) 281 sched.gFree.noStack.pushAll(q) 282 unlock(&sched.gFree.lock) 283 } 284 285 // markrootSpans marks roots for one shard of work.spans. 286 // 287 //go:nowritebarrier 288 func markrootSpans(gcw *gcWork, shard int) { 289 // Objects with finalizers have two GC-related invariants: 290 // 291 // 1) Everything reachable from the object must be marked. 292 // This ensures that when we pass the object to its finalizer, 293 // everything the finalizer can reach will be retained. 294 // 295 // 2) Finalizer specials (which are not in the garbage 296 // collected heap) are roots. In practice, this means the fn 297 // field must be scanned. 298 // 299 // TODO(austin): There are several ideas for making this more 300 // efficient in issue #11485. 301 302 sg := mheap_.sweepgen 303 spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard) 304 // Note that work.spans may not include spans that were 305 // allocated between entering the scan phase and now. This is 306 // okay because any objects with finalizers in those spans 307 // must have been allocated and given finalizers after we 308 // entered the scan phase, so addfinalizer will have ensured 309 // the above invariants for them. 310 for _, s := range spans { 311 if s.state != mSpanInUse { 312 continue 313 } 314 // Check that this span was swept (it may be cached or uncached). 315 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { 316 // sweepgen was updated (+2) during non-checkmark GC pass 317 print("sweep ", s.sweepgen, " ", sg, "\n") 318 throw("gc: unswept span") 319 } 320 321 // Speculatively check if there are any specials 322 // without acquiring the span lock. This may race with 323 // adding the first special to a span, but in that 324 // case addfinalizer will observe that the GC is 325 // active (which is globally synchronized) and ensure 326 // the above invariants. We may also ensure the 327 // invariants, but it's okay to scan an object twice. 328 if s.specials == nil { 329 continue 330 } 331 332 // Lock the specials to prevent a special from being 333 // removed from the list while we're traversing it. 334 lock(&s.speciallock) 335 336 for sp := s.specials; sp != nil; sp = sp.next { 337 if sp.kind != _KindSpecialFinalizer { 338 continue 339 } 340 // don't mark finalized object, but scan it so we 341 // retain everything it points to. 342 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 343 // A finalizer can be set for an inner byte of an object, find object beginning. 344 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 345 346 // Mark everything that can be reached from 347 // the object (but *not* the object itself or 348 // we'll never collect it). 349 scanobject(p, gcw) 350 351 // The special itself is a root. 352 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) 353 } 354 355 unlock(&s.speciallock) 356 } 357 } 358 359 // gcAssistAlloc performs GC work to make gp's assist debt positive. 360 // gp must be the calling user gorountine. 361 // 362 // This must be called with preemption enabled. 363 func gcAssistAlloc(gp *g) { 364 // Don't assist in non-preemptible contexts. These are 365 // generally fragile and won't allow the assist to block. 366 if getg() == gp.m.g0 { 367 return 368 } 369 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 370 return 371 } 372 373 traced := false 374 retry: 375 // Compute the amount of scan work we need to do to make the 376 // balance positive. When the required amount of work is low, 377 // we over-assist to build up credit for future allocations 378 // and amortize the cost of assisting. 379 debtBytes := -gp.gcAssistBytes 380 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes)) 381 if scanWork < gcOverAssistWork { 382 scanWork = gcOverAssistWork 383 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork)) 384 } 385 386 // Steal as much credit as we can from the background GC's 387 // scan credit. This is racy and may drop the background 388 // credit below 0 if two mutators steal at the same time. This 389 // will just cause steals to fail until credit is accumulated 390 // again, so in the long run it doesn't really matter, but we 391 // do have to handle the negative credit case. 392 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 393 stolen := int64(0) 394 if bgScanCredit > 0 { 395 if bgScanCredit < scanWork { 396 stolen = bgScanCredit 397 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen)) 398 } else { 399 stolen = scanWork 400 gp.gcAssistBytes += debtBytes 401 } 402 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 403 404 scanWork -= stolen 405 406 if scanWork == 0 { 407 // We were able to steal all of the credit we 408 // needed. 409 if traced { 410 traceGCMarkAssistDone() 411 } 412 return 413 } 414 } 415 416 if trace.enabled && !traced { 417 traced = true 418 traceGCMarkAssistStart() 419 } 420 421 // Perform assist work 422 systemstack(func() { 423 gcAssistAlloc1(gp, scanWork) 424 // The user stack may have moved, so this can't touch 425 // anything on it until it returns from systemstack. 426 }) 427 428 completed := gp.param != nil 429 gp.param = nil 430 if completed { 431 gcMarkDone() 432 } 433 434 if gp.gcAssistBytes < 0 { 435 // We were unable steal enough credit or perform 436 // enough work to pay off the assist debt. We need to 437 // do one of these before letting the mutator allocate 438 // more to prevent over-allocation. 439 // 440 // If this is because we were preempted, reschedule 441 // and try some more. 442 if gp.preempt { 443 Gosched() 444 goto retry 445 } 446 447 // Add this G to an assist queue and park. When the GC 448 // has more background credit, it will satisfy queued 449 // assists before flushing to the global credit pool. 450 // 451 // Note that this does *not* get woken up when more 452 // work is added to the work list. The theory is that 453 // there wasn't enough work to do anyway, so we might 454 // as well let background marking take care of the 455 // work that is available. 456 if !gcParkAssist() { 457 goto retry 458 } 459 460 // At this point either background GC has satisfied 461 // this G's assist debt, or the GC cycle is over. 462 } 463 if traced { 464 traceGCMarkAssistDone() 465 } 466 } 467 468 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system 469 // stack. This is a separate function to make it easier to see that 470 // we're not capturing anything from the user stack, since the user 471 // stack may move while we're in this function. 472 // 473 // gcAssistAlloc1 indicates whether this assist completed the mark 474 // phase by setting gp.param to non-nil. This can't be communicated on 475 // the stack since it may move. 476 // 477 //go:systemstack 478 func gcAssistAlloc1(gp *g, scanWork int64) { 479 // Clear the flag indicating that this assist completed the 480 // mark phase. 481 gp.param = nil 482 483 if atomic.Load(&gcBlackenEnabled) == 0 { 484 // The gcBlackenEnabled check in malloc races with the 485 // store that clears it but an atomic check in every malloc 486 // would be a performance hit. 487 // Instead we recheck it here on the non-preemptable system 488 // stack to determine if we should perform an assist. 489 490 // GC is done, so ignore any remaining debt. 491 gp.gcAssistBytes = 0 492 return 493 } 494 // Track time spent in this assist. Since we're on the 495 // system stack, this is non-preemptible, so we can 496 // just measure start and end time. 497 startTime := nanotime() 498 499 decnwait := atomic.Xadd(&work.nwait, -1) 500 if decnwait == work.nproc { 501 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 502 throw("nwait > work.nprocs") 503 } 504 505 // gcDrainN requires the caller to be preemptible. 506 casgstatus(gp, _Grunning, _Gwaiting) 507 gp.waitreason = waitReasonGCAssistMarking 508 509 // drain own cached work first in the hopes that it 510 // will be more cache friendly. 511 gcw := &getg().m.p.ptr().gcw 512 workDone := gcDrainN(gcw, scanWork) 513 514 casgstatus(gp, _Gwaiting, _Grunning) 515 516 // Record that we did this much scan work. 517 // 518 // Back out the number of bytes of assist credit that 519 // this scan work counts for. The "1+" is a poor man's 520 // round-up, to ensure this adds credit even if 521 // assistBytesPerWork is very low. 522 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone)) 523 524 // If this is the last worker and we ran out of work, 525 // signal a completion point. 526 incnwait := atomic.Xadd(&work.nwait, +1) 527 if incnwait > work.nproc { 528 println("runtime: work.nwait=", incnwait, 529 "work.nproc=", work.nproc) 530 throw("work.nwait > work.nproc") 531 } 532 533 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 534 // This has reached a background completion point. Set 535 // gp.param to a non-nil value to indicate this. It 536 // doesn't matter what we set it to (it just has to be 537 // a valid pointer). 538 gp.param = unsafe.Pointer(gp) 539 } 540 duration := nanotime() - startTime 541 _p_ := gp.m.p.ptr() 542 _p_.gcAssistTime += duration 543 if _p_.gcAssistTime > gcAssistTimeSlack { 544 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 545 _p_.gcAssistTime = 0 546 } 547 } 548 549 // gcWakeAllAssists wakes all currently blocked assists. This is used 550 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 551 // new assists from going to sleep after this point. 552 func gcWakeAllAssists() { 553 lock(&work.assistQueue.lock) 554 list := work.assistQueue.q.popList() 555 injectglist(&list) 556 unlock(&work.assistQueue.lock) 557 } 558 559 // gcParkAssist puts the current goroutine on the assist queue and parks. 560 // 561 // gcParkAssist reports whether the assist is now satisfied. If it 562 // returns false, the caller must retry the assist. 563 // 564 //go:nowritebarrier 565 func gcParkAssist() bool { 566 lock(&work.assistQueue.lock) 567 // If the GC cycle finished while we were getting the lock, 568 // exit the assist. The cycle can't finish while we hold the 569 // lock. 570 if atomic.Load(&gcBlackenEnabled) == 0 { 571 unlock(&work.assistQueue.lock) 572 return true 573 } 574 575 gp := getg() 576 oldList := work.assistQueue.q 577 work.assistQueue.q.pushBack(gp) 578 579 // Recheck for background credit now that this G is in 580 // the queue, but can still back out. This avoids a 581 // race in case background marking has flushed more 582 // credit since we checked above. 583 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 584 work.assistQueue.q = oldList 585 if oldList.tail != 0 { 586 oldList.tail.ptr().schedlink.set(nil) 587 } 588 unlock(&work.assistQueue.lock) 589 return false 590 } 591 // Park. 592 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2) 593 return true 594 } 595 596 // gcFlushBgCredit flushes scanWork units of background scan work 597 // credit. This first satisfies blocked assists on the 598 // work.assistQueue and then flushes any remaining credit to 599 // gcController.bgScanCredit. 600 // 601 // Write barriers are disallowed because this is used by gcDrain after 602 // it has ensured that all work is drained and this must preserve that 603 // condition. 604 // 605 //go:nowritebarrierrec 606 func gcFlushBgCredit(scanWork int64) { 607 if work.assistQueue.q.empty() { 608 // Fast path; there are no blocked assists. There's a 609 // small window here where an assist may add itself to 610 // the blocked queue and park. If that happens, we'll 611 // just get it on the next flush. 612 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 613 return 614 } 615 616 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork) 617 618 lock(&work.assistQueue.lock) 619 for !work.assistQueue.q.empty() && scanBytes > 0 { 620 gp := work.assistQueue.q.pop() 621 // Note that gp.gcAssistBytes is negative because gp 622 // is in debt. Think carefully about the signs below. 623 if scanBytes+gp.gcAssistBytes >= 0 { 624 // Satisfy this entire assist debt. 625 scanBytes += gp.gcAssistBytes 626 gp.gcAssistBytes = 0 627 // It's important that we *not* put gp in 628 // runnext. Otherwise, it's possible for user 629 // code to exploit the GC worker's high 630 // scheduler priority to get itself always run 631 // before other goroutines and always in the 632 // fresh quantum started by GC. 633 ready(gp, 0, false) 634 } else { 635 // Partially satisfy this assist. 636 gp.gcAssistBytes += scanBytes 637 scanBytes = 0 638 // As a heuristic, we move this assist to the 639 // back of the queue so that large assists 640 // can't clog up the assist queue and 641 // substantially delay small assists. 642 work.assistQueue.q.pushBack(gp) 643 break 644 } 645 } 646 647 if scanBytes > 0 { 648 // Convert from scan bytes back to work. 649 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte) 650 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 651 } 652 unlock(&work.assistQueue.lock) 653 } 654 655 // scanstack scans gp's stack, greying all pointers found on the stack. 656 // 657 // scanstack is marked go:systemstack because it must not be preempted 658 // while using a workbuf. 659 // 660 //go:nowritebarrier 661 //go:systemstack 662 func scanstack(gp *g, gcw *gcWork) { 663 if gp.gcscanvalid { 664 return 665 } 666 667 if readgstatus(gp)&_Gscan == 0 { 668 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 669 throw("scanstack - bad status") 670 } 671 672 switch readgstatus(gp) &^ _Gscan { 673 default: 674 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 675 throw("mark - bad status") 676 case _Gdead: 677 return 678 case _Grunning: 679 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 680 throw("scanstack: goroutine not stopped") 681 case _Grunnable, _Gsyscall, _Gwaiting: 682 // ok 683 } 684 685 if gp == getg() { 686 throw("can't scan our own stack") 687 } 688 689 // Shrink the stack if not much of it is being used. 690 shrinkstack(gp) 691 692 var state stackScanState 693 state.stack = gp.stack 694 695 if stackTraceDebug { 696 println("stack trace goroutine", gp.goid) 697 } 698 699 // Scan the saved context register. This is effectively a live 700 // register that gets moved back and forth between the 701 // register and sched.ctxt without a write barrier. 702 if gp.sched.ctxt != nil { 703 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state) 704 } 705 706 // Scan the stack. Accumulate a list of stack objects. 707 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 708 scanframeworker(frame, &state, gcw) 709 return true 710 } 711 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 712 713 // Find additional pointers that point into the stack from the heap. 714 // Currently this includes defers and panics. See also function copystack. 715 tracebackdefers(gp, scanframe, nil) 716 if gp._panic != nil { 717 state.putPtr(uintptr(unsafe.Pointer(gp._panic))) 718 } 719 720 // Find and scan all reachable stack objects. 721 state.buildIndex() 722 for { 723 p := state.getPtr() 724 if p == 0 { 725 break 726 } 727 obj := state.findObject(p) 728 if obj == nil { 729 continue 730 } 731 t := obj.typ 732 if t == nil { 733 // We've already scanned this object. 734 continue 735 } 736 obj.setType(nil) // Don't scan it again. 737 if stackTraceDebug { 738 println(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string()) 739 } 740 gcdata := t.gcdata 741 var s *mspan 742 if t.kind&kindGCProg != 0 { 743 // This path is pretty unlikely, an object large enough 744 // to have a GC program allocated on the stack. 745 // We need some space to unpack the program into a straight 746 // bitmask, which we allocate/free here. 747 // TODO: it would be nice if there were a way to run a GC 748 // program without having to store all its bits. We'd have 749 // to change from a Lempel-Ziv style program to something else. 750 // Or we can forbid putting objects on stacks if they require 751 // a gc program (see issue 27447). 752 s = materializeGCProg(t.ptrdata, gcdata) 753 gcdata = (*byte)(unsafe.Pointer(s.startAddr)) 754 } 755 756 scanblock(state.stack.lo+uintptr(obj.off), t.ptrdata, gcdata, gcw, &state) 757 758 if s != nil { 759 dematerializeGCProg(s) 760 } 761 } 762 763 // Deallocate object buffers. 764 // (Pointer buffers were all deallocated in the loop above.) 765 for state.head != nil { 766 x := state.head 767 state.head = x.next 768 if stackTraceDebug { 769 for _, obj := range x.obj[:x.nobj] { 770 if obj.typ == nil { // reachable 771 continue 772 } 773 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string()) 774 // Note: not necessarily really dead - only reachable-from-ptr dead. 775 } 776 } 777 x.nobj = 0 778 putempty((*workbuf)(unsafe.Pointer(x))) 779 } 780 if state.buf != nil || state.freeBuf != nil { 781 throw("remaining pointer buffers") 782 } 783 784 gp.gcscanvalid = true 785 } 786 787 // Scan a stack frame: local variables and function arguments/results. 788 //go:nowritebarrier 789 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { 790 if _DebugGC > 1 && frame.continpc != 0 { 791 print("scanframe ", funcname(frame.fn), "\n") 792 } 793 794 locals, args, objs := getStackMap(frame, &state.cache, false) 795 796 // Scan local variables if stack frame has been allocated. 797 if locals.n > 0 { 798 size := uintptr(locals.n) * sys.PtrSize 799 scanblock(frame.varp-size, size, locals.bytedata, gcw, state) 800 } 801 802 // Scan arguments. 803 if args.n > 0 { 804 scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state) 805 } 806 807 // Add all stack objects to the stack object list. 808 if frame.varp != 0 { 809 // varp is 0 for defers, where there are no locals. 810 // In that case, there can't be a pointer to its args, either. 811 // (And all args would be scanned above anyway.) 812 for _, obj := range objs { 813 off := obj.off 814 base := frame.varp // locals base pointer 815 if off >= 0 { 816 base = frame.argp // arguments and return values base pointer 817 } 818 ptr := base + uintptr(off) 819 if ptr < frame.sp { 820 // object hasn't been allocated in the frame yet. 821 continue 822 } 823 if stackTraceDebug { 824 println("stkobj at", hex(ptr), "of type", obj.typ.string()) 825 } 826 state.addObject(ptr, obj.typ) 827 } 828 } 829 } 830 831 type gcDrainFlags int 832 833 const ( 834 gcDrainUntilPreempt gcDrainFlags = 1 << iota 835 gcDrainFlushBgCredit 836 gcDrainIdle 837 gcDrainFractional 838 ) 839 840 // gcDrain scans roots and objects in work buffers, blackening grey 841 // objects until it is unable to get more work. It may return before 842 // GC is done; it's the caller's responsibility to balance work from 843 // other Ps. 844 // 845 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 846 // is set. 847 // 848 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work 849 // to do. 850 // 851 // If flags&gcDrainFractional != 0, gcDrain self-preempts when 852 // pollFractionalWorkerExit() returns true. This implies 853 // gcDrainNoBlock. 854 // 855 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 856 // credit to gcController.bgScanCredit every gcCreditSlack units of 857 // scan work. 858 // 859 //go:nowritebarrier 860 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 861 if !writeBarrier.needed { 862 throw("gcDrain phase incorrect") 863 } 864 865 gp := getg().m.curg 866 preemptible := flags&gcDrainUntilPreempt != 0 867 flushBgCredit := flags&gcDrainFlushBgCredit != 0 868 idle := flags&gcDrainIdle != 0 869 870 initScanWork := gcw.scanWork 871 872 // checkWork is the scan work before performing the next 873 // self-preempt check. 874 checkWork := int64(1<<63 - 1) 875 var check func() bool 876 if flags&(gcDrainIdle|gcDrainFractional) != 0 { 877 checkWork = initScanWork + drainCheckThreshold 878 if idle { 879 check = pollWork 880 } else if flags&gcDrainFractional != 0 { 881 check = pollFractionalWorkerExit 882 } 883 } 884 885 // Drain root marking jobs. 886 if work.markrootNext < work.markrootJobs { 887 for !(preemptible && gp.preempt) { 888 job := atomic.Xadd(&work.markrootNext, +1) - 1 889 if job >= work.markrootJobs { 890 break 891 } 892 markroot(gcw, job) 893 if check != nil && check() { 894 goto done 895 } 896 } 897 } 898 899 // Drain heap marking jobs. 900 for !(preemptible && gp.preempt) { 901 // Try to keep work available on the global queue. We used to 902 // check if there were waiting workers, but it's better to 903 // just keep work available than to make workers wait. In the 904 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 905 // balances. 906 if work.full == 0 { 907 gcw.balance() 908 } 909 910 b := gcw.tryGetFast() 911 if b == 0 { 912 b = gcw.tryGet() 913 if b == 0 { 914 // Flush the write barrier 915 // buffer; this may create 916 // more work. 917 wbBufFlush(nil, 0) 918 b = gcw.tryGet() 919 } 920 } 921 if b == 0 { 922 // Unable to get work. 923 break 924 } 925 scanobject(b, gcw) 926 927 // Flush background scan work credit to the global 928 // account if we've accumulated enough locally so 929 // mutator assists can draw on it. 930 if gcw.scanWork >= gcCreditSlack { 931 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 932 if flushBgCredit { 933 gcFlushBgCredit(gcw.scanWork - initScanWork) 934 initScanWork = 0 935 } 936 checkWork -= gcw.scanWork 937 gcw.scanWork = 0 938 939 if checkWork <= 0 { 940 checkWork += drainCheckThreshold 941 if check != nil && check() { 942 break 943 } 944 } 945 } 946 } 947 948 done: 949 // Flush remaining scan work credit. 950 if gcw.scanWork > 0 { 951 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 952 if flushBgCredit { 953 gcFlushBgCredit(gcw.scanWork - initScanWork) 954 } 955 gcw.scanWork = 0 956 } 957 } 958 959 // gcDrainN blackens grey objects until it has performed roughly 960 // scanWork units of scan work or the G is preempted. This is 961 // best-effort, so it may perform less work if it fails to get a work 962 // buffer. Otherwise, it will perform at least n units of work, but 963 // may perform more because scanning is always done in whole object 964 // increments. It returns the amount of scan work performed. 965 // 966 // The caller goroutine must be in a preemptible state (e.g., 967 // _Gwaiting) to prevent deadlocks during stack scanning. As a 968 // consequence, this must be called on the system stack. 969 // 970 //go:nowritebarrier 971 //go:systemstack 972 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 973 if !writeBarrier.needed { 974 throw("gcDrainN phase incorrect") 975 } 976 977 // There may already be scan work on the gcw, which we don't 978 // want to claim was done by this call. 979 workFlushed := -gcw.scanWork 980 981 gp := getg().m.curg 982 for !gp.preempt && workFlushed+gcw.scanWork < scanWork { 983 // See gcDrain comment. 984 if work.full == 0 { 985 gcw.balance() 986 } 987 988 // This might be a good place to add prefetch code... 989 // if(wbuf.nobj > 4) { 990 // PREFETCH(wbuf->obj[wbuf.nobj - 3]; 991 // } 992 // 993 b := gcw.tryGetFast() 994 if b == 0 { 995 b = gcw.tryGet() 996 if b == 0 { 997 // Flush the write barrier buffer; 998 // this may create more work. 999 wbBufFlush(nil, 0) 1000 b = gcw.tryGet() 1001 } 1002 } 1003 1004 if b == 0 { 1005 // Try to do a root job. 1006 // 1007 // TODO: Assists should get credit for this 1008 // work. 1009 if work.markrootNext < work.markrootJobs { 1010 job := atomic.Xadd(&work.markrootNext, +1) - 1 1011 if job < work.markrootJobs { 1012 markroot(gcw, job) 1013 continue 1014 } 1015 } 1016 // No heap or root jobs. 1017 break 1018 } 1019 scanobject(b, gcw) 1020 1021 // Flush background scan work credit. 1022 if gcw.scanWork >= gcCreditSlack { 1023 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1024 workFlushed += gcw.scanWork 1025 gcw.scanWork = 0 1026 } 1027 } 1028 1029 // Unlike gcDrain, there's no need to flush remaining work 1030 // here because this never flushes to bgScanCredit and 1031 // gcw.dispose will flush any remaining work to scanWork. 1032 1033 return workFlushed + gcw.scanWork 1034 } 1035 1036 // scanblock scans b as scanobject would, but using an explicit 1037 // pointer bitmap instead of the heap bitmap. 1038 // 1039 // This is used to scan non-heap roots, so it does not update 1040 // gcw.bytesMarked or gcw.scanWork. 1041 // 1042 // If stk != nil, possible stack pointers are also reported to stk.putPtr. 1043 //go:nowritebarrier 1044 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { 1045 // Use local copies of original parameters, so that a stack trace 1046 // due to one of the throws below shows the original block 1047 // base and extent. 1048 b := b0 1049 n := n0 1050 1051 for i := uintptr(0); i < n; { 1052 // Find bits for the next word. 1053 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8))) 1054 if bits == 0 { 1055 i += sys.PtrSize * 8 1056 continue 1057 } 1058 for j := 0; j < 8 && i < n; j++ { 1059 if bits&1 != 0 { 1060 // Same work as in scanobject; see comments there. 1061 p := *(*uintptr)(unsafe.Pointer(b + i)) 1062 if p != 0 { 1063 if obj, span, objIndex := findObject(p, b, i); obj != 0 { 1064 greyobject(obj, b, i, span, gcw, objIndex) 1065 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { 1066 stk.putPtr(p) 1067 } 1068 } 1069 } 1070 bits >>= 1 1071 i += sys.PtrSize 1072 } 1073 } 1074 } 1075 1076 // scanobject scans the object starting at b, adding pointers to gcw. 1077 // b must point to the beginning of a heap object or an oblet. 1078 // scanobject consults the GC bitmap for the pointer mask and the 1079 // spans for the size of the object. 1080 // 1081 //go:nowritebarrier 1082 func scanobject(b uintptr, gcw *gcWork) { 1083 // Find the bits for b and the size of the object at b. 1084 // 1085 // b is either the beginning of an object, in which case this 1086 // is the size of the object to scan, or it points to an 1087 // oblet, in which case we compute the size to scan below. 1088 hbits := heapBitsForAddr(b) 1089 s := spanOfUnchecked(b) 1090 n := s.elemsize 1091 if n == 0 { 1092 throw("scanobject n == 0") 1093 } 1094 1095 if n > maxObletBytes { 1096 // Large object. Break into oblets for better 1097 // parallelism and lower latency. 1098 if b == s.base() { 1099 // It's possible this is a noscan object (not 1100 // from greyobject, but from other code 1101 // paths), in which case we must *not* enqueue 1102 // oblets since their bitmaps will be 1103 // uninitialized. 1104 if s.spanclass.noscan() { 1105 // Bypass the whole scan. 1106 gcw.bytesMarked += uint64(n) 1107 return 1108 } 1109 1110 // Enqueue the other oblets to scan later. 1111 // Some oblets may be in b's scalar tail, but 1112 // these will be marked as "no more pointers", 1113 // so we'll drop out immediately when we go to 1114 // scan those. 1115 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { 1116 if !gcw.putFast(oblet) { 1117 gcw.put(oblet) 1118 } 1119 } 1120 } 1121 1122 // Compute the size of the oblet. Since this object 1123 // must be a large object, s.base() is the beginning 1124 // of the object. 1125 n = s.base() + s.elemsize - b 1126 if n > maxObletBytes { 1127 n = maxObletBytes 1128 } 1129 } 1130 1131 var i uintptr 1132 for i = 0; i < n; i += sys.PtrSize { 1133 // Find bits for this word. 1134 if i != 0 { 1135 // Avoid needless hbits.next() on last iteration. 1136 hbits = hbits.next() 1137 } 1138 // Load bits once. See CL 22712 and issue 16973 for discussion. 1139 bits := hbits.bits() 1140 // During checkmarking, 1-word objects store the checkmark 1141 // in the type bit for the one word. The only one-word objects 1142 // are pointers, or else they'd be merged with other non-pointer 1143 // data into larger allocations. 1144 if i != 1*sys.PtrSize && bits&bitScan == 0 { 1145 break // no more pointers in this object 1146 } 1147 if bits&bitPointer == 0 { 1148 continue // not a pointer 1149 } 1150 1151 // Work here is duplicated in scanblock and above. 1152 // If you make changes here, make changes there too. 1153 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1154 1155 // At this point we have extracted the next potential pointer. 1156 // Quickly filter out nil and pointers back to the current object. 1157 if obj != 0 && obj-b >= n { 1158 // Test if obj points into the Go heap and, if so, 1159 // mark the object. 1160 // 1161 // Note that it's possible for findObject to 1162 // fail if obj points to a just-allocated heap 1163 // object because of a race with growing the 1164 // heap. In this case, we know the object was 1165 // just allocated and hence will be marked by 1166 // allocation itself. 1167 if obj, span, objIndex := findObject(obj, b, i); obj != 0 { 1168 greyobject(obj, b, i, span, gcw, objIndex) 1169 } 1170 } 1171 } 1172 gcw.bytesMarked += uint64(n) 1173 gcw.scanWork += int64(i) 1174 } 1175 1176 // Shade the object if it isn't already. 1177 // The object is not nil and known to be in the heap. 1178 // Preemption must be disabled. 1179 //go:nowritebarrier 1180 func shade(b uintptr) { 1181 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { 1182 gcw := &getg().m.p.ptr().gcw 1183 greyobject(obj, 0, 0, span, gcw, objIndex) 1184 } 1185 } 1186 1187 // obj is the start of an object with mark mbits. 1188 // If it isn't already marked, mark it and enqueue into gcw. 1189 // base and off are for debugging only and could be removed. 1190 // 1191 // See also wbBufFlush1, which partially duplicates this logic. 1192 // 1193 //go:nowritebarrierrec 1194 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) { 1195 // obj should be start of allocation, and so must be at least pointer-aligned. 1196 if obj&(sys.PtrSize-1) != 0 { 1197 throw("greyobject: obj not pointer-aligned") 1198 } 1199 mbits := span.markBitsForIndex(objIndex) 1200 1201 if useCheckmark { 1202 if !mbits.isMarked() { 1203 printlock() 1204 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n") 1205 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") 1206 1207 // Dump the source (base) object 1208 gcDumpObject("base", base, off) 1209 1210 // Dump the object 1211 gcDumpObject("obj", obj, ^uintptr(0)) 1212 1213 getg().m.traceback = 2 1214 throw("checkmark found unmarked object") 1215 } 1216 hbits := heapBitsForAddr(obj) 1217 if hbits.isCheckmarked(span.elemsize) { 1218 return 1219 } 1220 hbits.setCheckmarked(span.elemsize) 1221 if !hbits.isCheckmarked(span.elemsize) { 1222 throw("setCheckmarked and isCheckmarked disagree") 1223 } 1224 } else { 1225 if debug.gccheckmark > 0 && span.isFree(objIndex) { 1226 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") 1227 gcDumpObject("base", base, off) 1228 gcDumpObject("obj", obj, ^uintptr(0)) 1229 getg().m.traceback = 2 1230 throw("marking free object") 1231 } 1232 1233 // If marked we have nothing to do. 1234 if mbits.isMarked() { 1235 return 1236 } 1237 mbits.setMarked() 1238 1239 // Mark span. 1240 arena, pageIdx, pageMask := pageIndexOf(span.base()) 1241 if arena.pageMarks[pageIdx]&pageMask == 0 { 1242 atomic.Or8(&arena.pageMarks[pageIdx], pageMask) 1243 } 1244 1245 // If this is a noscan object, fast-track it to black 1246 // instead of greying it. 1247 if span.spanclass.noscan() { 1248 gcw.bytesMarked += uint64(span.elemsize) 1249 return 1250 } 1251 } 1252 1253 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but 1254 // seems like a nice optimization that can be added back in. 1255 // There needs to be time between the PREFETCH and the use. 1256 // Previously we put the obj in an 8 element buffer that is drained at a rate 1257 // to give the PREFETCH time to do its work. 1258 // Use of PREFETCHNTA might be more appropriate than PREFETCH 1259 if !gcw.putFast(obj) { 1260 gcw.put(obj) 1261 } 1262 } 1263 1264 // gcDumpObject dumps the contents of obj for debugging and marks the 1265 // field at byte offset off in obj. 1266 func gcDumpObject(label string, obj, off uintptr) { 1267 s := spanOf(obj) 1268 print(label, "=", hex(obj)) 1269 if s == nil { 1270 print(" s=nil\n") 1271 return 1272 } 1273 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=") 1274 if 0 <= s.state && int(s.state) < len(mSpanStateNames) { 1275 print(mSpanStateNames[s.state], "\n") 1276 } else { 1277 print("unknown(", s.state, ")\n") 1278 } 1279 1280 skipped := false 1281 size := s.elemsize 1282 if s.state == mSpanManual && size == 0 { 1283 // We're printing something from a stack frame. We 1284 // don't know how big it is, so just show up to an 1285 // including off. 1286 size = off + sys.PtrSize 1287 } 1288 for i := uintptr(0); i < size; i += sys.PtrSize { 1289 // For big objects, just print the beginning (because 1290 // that usually hints at the object's type) and the 1291 // fields around off. 1292 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) { 1293 skipped = true 1294 continue 1295 } 1296 if skipped { 1297 print(" ...\n") 1298 skipped = false 1299 } 1300 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1301 if i == off { 1302 print(" <==") 1303 } 1304 print("\n") 1305 } 1306 if skipped { 1307 print(" ...\n") 1308 } 1309 } 1310 1311 // gcmarknewobject marks a newly allocated object black. obj must 1312 // not contain any non-nil pointers. 1313 // 1314 // This is nosplit so it can manipulate a gcWork without preemption. 1315 // 1316 //go:nowritebarrier 1317 //go:nosplit 1318 func gcmarknewobject(obj, size, scanSize uintptr) { 1319 if useCheckmark { // The world should be stopped so this should not happen. 1320 throw("gcmarknewobject called while doing checkmark") 1321 } 1322 markBitsForAddr(obj).setMarked() 1323 gcw := &getg().m.p.ptr().gcw 1324 gcw.bytesMarked += uint64(size) 1325 gcw.scanWork += int64(scanSize) 1326 } 1327 1328 // gcMarkTinyAllocs greys all active tiny alloc blocks. 1329 // 1330 // The world must be stopped. 1331 func gcMarkTinyAllocs() { 1332 for _, p := range allp { 1333 c := p.mcache 1334 if c == nil || c.tiny == 0 { 1335 continue 1336 } 1337 _, span, objIndex := findObject(c.tiny, 0, 0) 1338 gcw := &p.gcw 1339 greyobject(c.tiny, 0, 0, span, gcw, objIndex) 1340 } 1341 } 1342 1343 // Checkmarking 1344 1345 // To help debug the concurrent GC we remark with the world 1346 // stopped ensuring that any object encountered has their normal 1347 // mark bit set. To do this we use an orthogonal bit 1348 // pattern to indicate the object is marked. The following pattern 1349 // uses the upper two bits in the object's boundary nibble. 1350 // 01: scalar not marked 1351 // 10: pointer not marked 1352 // 11: pointer marked 1353 // 00: scalar marked 1354 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa. 1355 // The higher bit is 1 for pointers and 0 for scalars, whether the object 1356 // is marked or not. 1357 // The first nibble no longer holds the typeDead pattern indicating that the 1358 // there are no more pointers in the object. This information is held 1359 // in the second nibble. 1360 1361 // If useCheckmark is true, marking of an object uses the 1362 // checkmark bits (encoding above) instead of the standard 1363 // mark bits. 1364 var useCheckmark = false 1365 1366 //go:nowritebarrier 1367 func initCheckmarks() { 1368 useCheckmark = true 1369 for _, s := range mheap_.allspans { 1370 if s.state == mSpanInUse { 1371 heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout()) 1372 } 1373 } 1374 } 1375 1376 func clearCheckmarks() { 1377 useCheckmark = false 1378 for _, s := range mheap_.allspans { 1379 if s.state == mSpanInUse { 1380 heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout()) 1381 } 1382 } 1383 }