github.com/liujq9674git/golang-src-1.7@v0.0.0-20230517174348-17f6ec47f3f8/src/runtime/mgcmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: marking and scanning 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 const ( 16 fixedRootFinalizers = iota 17 fixedRootFlushCaches 18 fixedRootFreeGStacks 19 fixedRootCount 20 21 // rootBlockBytes is the number of bytes to scan per data or 22 // BSS root. 23 rootBlockBytes = 256 << 10 24 25 // rootBlockSpans is the number of spans to scan per span 26 // root. 27 rootBlockSpans = 8 * 1024 // 64MB worth of spans 28 ) 29 30 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and 31 // some miscellany) and initializes scanning-related state. 32 // 33 // The caller must have call gcCopySpans(). 34 // 35 // The world must be stopped. 36 // 37 //go:nowritebarrier 38 func gcMarkRootPrepare() { 39 // Compute how many data and BSS root blocks there are. 40 nBlocks := func(bytes uintptr) int { 41 return int((bytes + rootBlockBytes - 1) / rootBlockBytes) 42 } 43 44 work.nDataRoots = 0 45 work.nBSSRoots = 0 46 47 // Only scan globals once per cycle; preferably concurrently. 48 if !work.markrootDone { 49 for datap := &firstmoduledata; datap != nil; datap = datap.next { 50 nDataRoots := nBlocks(datap.edata - datap.data) 51 if nDataRoots > work.nDataRoots { 52 work.nDataRoots = nDataRoots 53 } 54 } 55 56 for datap := &firstmoduledata; datap != nil; datap = datap.next { 57 nBSSRoots := nBlocks(datap.ebss - datap.bss) 58 if nBSSRoots > work.nBSSRoots { 59 work.nBSSRoots = nBSSRoots 60 } 61 } 62 } 63 64 if !work.markrootDone { 65 // On the first markroot, we need to scan span roots. 66 // In concurrent GC, this happens during concurrent 67 // mark and we depend on addfinalizer to ensure the 68 // above invariants for objects that get finalizers 69 // after concurrent mark. In STW GC, this will happen 70 // during mark termination. 71 work.nSpanRoots = (len(work.spans) + rootBlockSpans - 1) / rootBlockSpans 72 73 // On the first markroot, we need to scan all Gs. Gs 74 // may be created after this point, but it's okay that 75 // we ignore them because they begin life without any 76 // roots, so there's nothing to scan, and any roots 77 // they create during the concurrent phase will be 78 // scanned during mark termination. During mark 79 // termination, allglen isn't changing, so we'll scan 80 // all Gs. 81 work.nStackRoots = int(atomic.Loaduintptr(&allglen)) 82 work.nRescanRoots = 0 83 } else { 84 // We've already scanned span roots and kept the scan 85 // up-to-date during concurrent mark. 86 work.nSpanRoots = 0 87 88 // On the second pass of markroot, we're just scanning 89 // dirty stacks. It's safe to access rescan since the 90 // world is stopped. 91 work.nStackRoots = 0 92 work.nRescanRoots = len(work.rescan.list) 93 } 94 95 work.markrootNext = 0 96 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots) 97 } 98 99 // gcMarkRootCheck checks that all roots have been scanned. It is 100 // purely for debugging. 101 func gcMarkRootCheck() { 102 if work.markrootNext < work.markrootJobs { 103 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") 104 throw("left over markroot jobs") 105 } 106 107 lock(&allglock) 108 // Check that stacks have been scanned. 109 var gp *g 110 if gcphase == _GCmarktermination { 111 for i := 0; i < len(allgs); i++ { 112 gp = allgs[i] 113 if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead { 114 goto fail 115 } 116 } 117 } else { 118 for i := 0; i < work.nStackRoots; i++ { 119 gp = allgs[i] 120 if !gp.gcscandone { 121 goto fail 122 } 123 } 124 } 125 unlock(&allglock) 126 return 127 128 fail: 129 println("gp", gp, "goid", gp.goid, 130 "status", readgstatus(gp), 131 "gcscandone", gp.gcscandone, 132 "gcscanvalid", gp.gcscanvalid) 133 unlock(&allglock) // Avoid self-deadlock with traceback. 134 throw("scan missed a g") 135 } 136 137 // ptrmask for an allocation containing a single pointer. 138 var oneptrmask = [...]uint8{1} 139 140 // markroot scans the i'th root. 141 // 142 // Preemption must be disabled (because this uses a gcWork). 143 // 144 // nowritebarrier is only advisory here. 145 // 146 //go:nowritebarrier 147 func markroot(gcw *gcWork, i uint32) { 148 // TODO(austin): This is a bit ridiculous. Compute and store 149 // the bases in gcMarkRootPrepare instead of the counts. 150 baseData := uint32(fixedRootCount) 151 baseBSS := baseData + uint32(work.nDataRoots) 152 baseSpans := baseBSS + uint32(work.nBSSRoots) 153 baseStacks := baseSpans + uint32(work.nSpanRoots) 154 baseRescan := baseStacks + uint32(work.nStackRoots) 155 end := baseRescan + uint32(work.nRescanRoots) 156 157 // Note: if you add a case here, please also update heapdump.go:dumproots. 158 switch { 159 case baseData <= i && i < baseBSS: 160 for datap := &firstmoduledata; datap != nil; datap = datap.next { 161 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData)) 162 } 163 164 case baseBSS <= i && i < baseSpans: 165 for datap := &firstmoduledata; datap != nil; datap = datap.next { 166 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS)) 167 } 168 169 case i == fixedRootFinalizers: 170 for fb := allfin; fb != nil; fb = fb.alllink { 171 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) 172 } 173 174 case i == fixedRootFlushCaches: 175 if gcphase == _GCmarktermination { // Do not flush mcaches during concurrent phase. 176 flushallmcaches() 177 } 178 179 case i == fixedRootFreeGStacks: 180 // Only do this once per GC cycle; preferably 181 // concurrently. 182 if !work.markrootDone { 183 // Switch to the system stack so we can call 184 // stackfree. 185 systemstack(markrootFreeGStacks) 186 } 187 188 case baseSpans <= i && i < baseStacks: 189 // mark MSpan.specials 190 markrootSpans(gcw, int(i-baseSpans)) 191 192 default: 193 // the rest is scanning goroutine stacks 194 var gp *g 195 if baseStacks <= i && i < baseRescan { 196 gp = allgs[i-baseStacks] 197 } else if baseRescan <= i && i < end { 198 gp = work.rescan.list[i-baseRescan].ptr() 199 } else { 200 throw("markroot: bad index") 201 } 202 203 // remember when we've first observed the G blocked 204 // needed only to output in traceback 205 status := readgstatus(gp) // We are not in a scan state 206 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { 207 gp.waitsince = work.tstart 208 } 209 210 if gcphase != _GCmarktermination && gp.startpc == gcBgMarkWorkerPC && readgstatus(gp) != _Gdead { 211 // GC background workers may be 212 // non-preemptible, so we may deadlock if we 213 // try to scan them during a concurrent phase. 214 // They also have tiny stacks, so just ignore 215 // them until mark termination. 216 gp.gcscandone = true 217 queueRescan(gp) 218 break 219 } 220 221 // scang must be done on the system stack in case 222 // we're trying to scan our own stack. 223 systemstack(func() { 224 // If this is a self-scan, put the user G in 225 // _Gwaiting to prevent self-deadlock. It may 226 // already be in _Gwaiting if this is mark 227 // termination. 228 userG := getg().m.curg 229 selfScan := gp == userG && readgstatus(userG) == _Grunning 230 if selfScan { 231 casgstatus(userG, _Grunning, _Gwaiting) 232 userG.waitreason = "garbage collection scan" 233 } 234 235 // TODO: scang blocks until gp's stack has 236 // been scanned, which may take a while for 237 // running goroutines. Consider doing this in 238 // two phases where the first is non-blocking: 239 // we scan the stacks we can and ask running 240 // goroutines to scan themselves; and the 241 // second blocks. 242 scang(gp, gcw) 243 244 if selfScan { 245 casgstatus(userG, _Gwaiting, _Grunning) 246 } 247 }) 248 } 249 } 250 251 // markrootBlock scans the shard'th shard of the block of memory [b0, 252 // b0+n0), with the given pointer mask. 253 // 254 //go:nowritebarrier 255 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { 256 if rootBlockBytes%(8*sys.PtrSize) != 0 { 257 // This is necessary to pick byte offsets in ptrmask0. 258 throw("rootBlockBytes must be a multiple of 8*ptrSize") 259 } 260 261 b := b0 + uintptr(shard)*rootBlockBytes 262 if b >= b0+n0 { 263 return 264 } 265 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize)))) 266 n := uintptr(rootBlockBytes) 267 if b+n > b0+n0 { 268 n = b0 + n0 - b 269 } 270 271 // Scan this shard. 272 scanblock(b, n, ptrmask, gcw) 273 } 274 275 // markrootFreeGStacks frees stacks of dead Gs. 276 // 277 // This does not free stacks of dead Gs cached on Ps, but having a few 278 // cached stacks around isn't a problem. 279 // 280 //TODO go:nowritebarrier 281 func markrootFreeGStacks() { 282 // Take list of dead Gs with stacks. 283 lock(&sched.gflock) 284 list := sched.gfreeStack 285 sched.gfreeStack = nil 286 unlock(&sched.gflock) 287 if list == nil { 288 return 289 } 290 291 // Free stacks. 292 tail := list 293 for gp := list; gp != nil; gp = gp.schedlink.ptr() { 294 shrinkstack(gp) 295 tail = gp 296 } 297 298 // Put Gs back on the free list. 299 lock(&sched.gflock) 300 tail.schedlink.set(sched.gfreeNoStack) 301 sched.gfreeNoStack = list 302 unlock(&sched.gflock) 303 } 304 305 // markrootSpans marks roots for one shard of work.spans. 306 // 307 //go:nowritebarrier 308 func markrootSpans(gcw *gcWork, shard int) { 309 // Objects with finalizers have two GC-related invariants: 310 // 311 // 1) Everything reachable from the object must be marked. 312 // This ensures that when we pass the object to its finalizer, 313 // everything the finalizer can reach will be retained. 314 // 315 // 2) Finalizer specials (which are not in the garbage 316 // collected heap) are roots. In practice, this means the fn 317 // field must be scanned. 318 // 319 // TODO(austin): There are several ideas for making this more 320 // efficient in issue #11485. 321 322 if work.markrootDone { 323 throw("markrootSpans during second markroot") 324 } 325 326 sg := mheap_.sweepgen 327 startSpan := shard * rootBlockSpans 328 endSpan := (shard + 1) * rootBlockSpans 329 if endSpan > len(work.spans) { 330 endSpan = len(work.spans) 331 } 332 // Note that work.spans may not include spans that were 333 // allocated between entering the scan phase and now. This is 334 // okay because any objects with finalizers in those spans 335 // must have been allocated and given finalizers after we 336 // entered the scan phase, so addfinalizer will have ensured 337 // the above invariants for them. 338 for _, s := range work.spans[startSpan:endSpan] { 339 if s.state != mSpanInUse { 340 continue 341 } 342 if !useCheckmark && s.sweepgen != sg { 343 // sweepgen was updated (+2) during non-checkmark GC pass 344 print("sweep ", s.sweepgen, " ", sg, "\n") 345 throw("gc: unswept span") 346 } 347 348 // Speculatively check if there are any specials 349 // without acquiring the span lock. This may race with 350 // adding the first special to a span, but in that 351 // case addfinalizer will observe that the GC is 352 // active (which is globally synchronized) and ensure 353 // the above invariants. We may also ensure the 354 // invariants, but it's okay to scan an object twice. 355 if s.specials == nil { 356 continue 357 } 358 359 // Lock the specials to prevent a special from being 360 // removed from the list while we're traversing it. 361 lock(&s.speciallock) 362 363 for sp := s.specials; sp != nil; sp = sp.next { 364 if sp.kind != _KindSpecialFinalizer { 365 continue 366 } 367 // don't mark finalized object, but scan it so we 368 // retain everything it points to. 369 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 370 // A finalizer can be set for an inner byte of an object, find object beginning. 371 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize 372 373 // Mark everything that can be reached from 374 // the object (but *not* the object itself or 375 // we'll never collect it). 376 scanobject(p, gcw) 377 378 // The special itself is a root. 379 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw) 380 } 381 382 unlock(&s.speciallock) 383 } 384 } 385 386 // gcAssistAlloc performs GC work to make gp's assist debt positive. 387 // gp must be the calling user gorountine. 388 // 389 // This must be called with preemption enabled. 390 //go:nowritebarrier 391 func gcAssistAlloc(gp *g) { 392 // Don't assist in non-preemptible contexts. These are 393 // generally fragile and won't allow the assist to block. 394 if getg() == gp.m.g0 { 395 return 396 } 397 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" { 398 return 399 } 400 401 // Compute the amount of scan work we need to do to make the 402 // balance positive. When the required amount of work is low, 403 // we over-assist to build up credit for future allocations 404 // and amortize the cost of assisting. 405 debtBytes := -gp.gcAssistBytes 406 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes)) 407 if scanWork < gcOverAssistWork { 408 scanWork = gcOverAssistWork 409 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork)) 410 } 411 412 retry: 413 // Steal as much credit as we can from the background GC's 414 // scan credit. This is racy and may drop the background 415 // credit below 0 if two mutators steal at the same time. This 416 // will just cause steals to fail until credit is accumulated 417 // again, so in the long run it doesn't really matter, but we 418 // do have to handle the negative credit case. 419 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit) 420 stolen := int64(0) 421 if bgScanCredit > 0 { 422 if bgScanCredit < scanWork { 423 stolen = bgScanCredit 424 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen)) 425 } else { 426 stolen = scanWork 427 gp.gcAssistBytes += debtBytes 428 } 429 atomic.Xaddint64(&gcController.bgScanCredit, -stolen) 430 431 scanWork -= stolen 432 433 if scanWork == 0 { 434 // We were able to steal all of the credit we 435 // needed. 436 return 437 } 438 } 439 440 // Perform assist work 441 completed := false 442 systemstack(func() { 443 if atomic.Load(&gcBlackenEnabled) == 0 { 444 // The gcBlackenEnabled check in malloc races with the 445 // store that clears it but an atomic check in every malloc 446 // would be a performance hit. 447 // Instead we recheck it here on the non-preemptable system 448 // stack to determine if we should preform an assist. 449 450 // GC is done, so ignore any remaining debt. 451 gp.gcAssistBytes = 0 452 return 453 } 454 // Track time spent in this assist. Since we're on the 455 // system stack, this is non-preemptible, so we can 456 // just measure start and end time. 457 startTime := nanotime() 458 459 decnwait := atomic.Xadd(&work.nwait, -1) 460 if decnwait == work.nproc { 461 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) 462 throw("nwait > work.nprocs") 463 } 464 465 // drain own cached work first in the hopes that it 466 // will be more cache friendly. 467 gcw := &getg().m.p.ptr().gcw 468 workDone := gcDrainN(gcw, scanWork) 469 // If we are near the end of the mark phase 470 // dispose of the gcw. 471 if gcBlackenPromptly { 472 gcw.dispose() 473 } 474 475 // Record that we did this much scan work. 476 // 477 // Back out the number of bytes of assist credit that 478 // this scan work counts for. The "1+" is a poor man's 479 // round-up, to ensure this adds credit even if 480 // assistBytesPerWork is very low. 481 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone)) 482 483 // If this is the last worker and we ran out of work, 484 // signal a completion point. 485 incnwait := atomic.Xadd(&work.nwait, +1) 486 if incnwait > work.nproc { 487 println("runtime: work.nwait=", incnwait, 488 "work.nproc=", work.nproc, 489 "gcBlackenPromptly=", gcBlackenPromptly) 490 throw("work.nwait > work.nproc") 491 } 492 493 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 494 // This has reached a background completion 495 // point. 496 completed = true 497 } 498 duration := nanotime() - startTime 499 _p_ := gp.m.p.ptr() 500 _p_.gcAssistTime += duration 501 if _p_.gcAssistTime > gcAssistTimeSlack { 502 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime) 503 _p_.gcAssistTime = 0 504 } 505 }) 506 507 if completed { 508 gcMarkDone() 509 } 510 511 if gp.gcAssistBytes < 0 { 512 // We were unable steal enough credit or perform 513 // enough work to pay off the assist debt. We need to 514 // do one of these before letting the mutator allocate 515 // more to prevent over-allocation. 516 // 517 // If this is because we were preempted, reschedule 518 // and try some more. 519 if gp.preempt { 520 Gosched() 521 goto retry 522 } 523 524 // Add this G to an assist queue and park. When the GC 525 // has more background credit, it will satisfy queued 526 // assists before flushing to the global credit pool. 527 // 528 // Note that this does *not* get woken up when more 529 // work is added to the work list. The theory is that 530 // there wasn't enough work to do anyway, so we might 531 // as well let background marking take care of the 532 // work that is available. 533 lock(&work.assistQueue.lock) 534 535 // If the GC cycle is over, just return. This is the 536 // likely path if we completed above. We do this 537 // under the lock to prevent a GC cycle from ending 538 // between this check and queuing the assist. 539 if atomic.Load(&gcBlackenEnabled) == 0 { 540 unlock(&work.assistQueue.lock) 541 return 542 } 543 544 oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail 545 if oldHead == 0 { 546 work.assistQueue.head.set(gp) 547 } else { 548 oldTail.ptr().schedlink.set(gp) 549 } 550 work.assistQueue.tail.set(gp) 551 gp.schedlink.set(nil) 552 // Recheck for background credit now that this G is in 553 // the queue, but can still back out. This avoids a 554 // race in case background marking has flushed more 555 // credit since we checked above. 556 if atomic.Loadint64(&gcController.bgScanCredit) > 0 { 557 work.assistQueue.head = oldHead 558 work.assistQueue.tail = oldTail 559 if oldTail != 0 { 560 oldTail.ptr().schedlink.set(nil) 561 } 562 unlock(&work.assistQueue.lock) 563 goto retry 564 } 565 // Park for real. 566 goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlock, 2) 567 568 // At this point either background GC has satisfied 569 // this G's assist debt, or the GC cycle is over. 570 } 571 } 572 573 // gcWakeAllAssists wakes all currently blocked assists. This is used 574 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent 575 // new assists from going to sleep after this point. 576 func gcWakeAllAssists() { 577 lock(&work.assistQueue.lock) 578 injectglist(work.assistQueue.head.ptr()) 579 work.assistQueue.head.set(nil) 580 work.assistQueue.tail.set(nil) 581 unlock(&work.assistQueue.lock) 582 } 583 584 // gcFlushBgCredit flushes scanWork units of background scan work 585 // credit. This first satisfies blocked assists on the 586 // work.assistQueue and then flushes any remaining credit to 587 // gcController.bgScanCredit. 588 // 589 // Write barriers are disallowed because this is used by gcDrain after 590 // it has ensured that all work is drained and this must preserve that 591 // condition. 592 // 593 //go:nowritebarrierrec 594 func gcFlushBgCredit(scanWork int64) { 595 if work.assistQueue.head == 0 { 596 // Fast path; there are no blocked assists. There's a 597 // small window here where an assist may add itself to 598 // the blocked queue and park. If that happens, we'll 599 // just get it on the next flush. 600 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 601 return 602 } 603 604 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork) 605 606 lock(&work.assistQueue.lock) 607 gp := work.assistQueue.head.ptr() 608 for gp != nil && scanBytes > 0 { 609 // Note that gp.gcAssistBytes is negative because gp 610 // is in debt. Think carefully about the signs below. 611 if scanBytes+gp.gcAssistBytes >= 0 { 612 // Satisfy this entire assist debt. 613 scanBytes += gp.gcAssistBytes 614 gp.gcAssistBytes = 0 615 xgp := gp 616 gp = gp.schedlink.ptr() 617 // It's important that we *not* put xgp in 618 // runnext. Otherwise, it's possible for user 619 // code to exploit the GC worker's high 620 // scheduler priority to get itself always run 621 // before other goroutines and always in the 622 // fresh quantum started by GC. 623 ready(xgp, 0, false) 624 } else { 625 // Partially satisfy this assist. 626 gp.gcAssistBytes += scanBytes 627 scanBytes = 0 628 // As a heuristic, we move this assist to the 629 // back of the queue so that large assists 630 // can't clog up the assist queue and 631 // substantially delay small assists. 632 xgp := gp 633 gp = gp.schedlink.ptr() 634 if gp == nil { 635 // gp is the only assist in the queue. 636 gp = xgp 637 } else { 638 xgp.schedlink = 0 639 work.assistQueue.tail.ptr().schedlink.set(xgp) 640 work.assistQueue.tail.set(xgp) 641 } 642 break 643 } 644 } 645 work.assistQueue.head.set(gp) 646 if gp == nil { 647 work.assistQueue.tail.set(nil) 648 } 649 650 if scanBytes > 0 { 651 // Convert from scan bytes back to work. 652 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte) 653 atomic.Xaddint64(&gcController.bgScanCredit, scanWork) 654 } 655 unlock(&work.assistQueue.lock) 656 } 657 658 // scanstack scans gp's stack, greying all pointers found on the stack. 659 // 660 // During mark phase, it also installs stack barriers while traversing 661 // gp's stack. During mark termination, it stops scanning when it 662 // reaches an unhit stack barrier. 663 // 664 // scanstack is marked go:systemstack because it must not be preempted 665 // while using a workbuf. 666 // 667 //go:nowritebarrier 668 //go:systemstack 669 func scanstack(gp *g, gcw *gcWork) { 670 if gp.gcscanvalid { 671 return 672 } 673 674 if readgstatus(gp)&_Gscan == 0 { 675 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") 676 throw("scanstack - bad status") 677 } 678 679 switch readgstatus(gp) &^ _Gscan { 680 default: 681 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 682 throw("mark - bad status") 683 case _Gdead: 684 return 685 case _Grunning: 686 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 687 throw("scanstack: goroutine not stopped") 688 case _Grunnable, _Gsyscall, _Gwaiting: 689 // ok 690 } 691 692 if gp == getg() { 693 throw("can't scan our own stack") 694 } 695 mp := gp.m 696 if mp != nil && mp.helpgc != 0 { 697 throw("can't scan gchelper stack") 698 } 699 700 // Shrink the stack if not much of it is being used. During 701 // concurrent GC, we can do this during concurrent mark. 702 if !work.markrootDone { 703 shrinkstack(gp) 704 } 705 706 // Prepare for stack barrier insertion/removal. 707 var sp, barrierOffset, nextBarrier uintptr 708 if gp.syscallsp != 0 { 709 sp = gp.syscallsp 710 } else { 711 sp = gp.sched.sp 712 } 713 gcLockStackBarriers(gp) // Not necessary during mark term, but harmless. 714 switch gcphase { 715 case _GCmark: 716 // Install stack barriers during stack scan. 717 barrierOffset = uintptr(firstStackBarrierOffset) 718 nextBarrier = sp + barrierOffset 719 720 if debug.gcstackbarrieroff > 0 { 721 nextBarrier = ^uintptr(0) 722 } 723 724 // Remove any existing stack barriers before we 725 // install new ones. 726 gcRemoveStackBarriers(gp) 727 728 case _GCmarktermination: 729 if !work.markrootDone { 730 // This is a STW GC. There may be stale stack 731 // barriers from an earlier cycle since we 732 // never passed through mark phase. 733 gcRemoveStackBarriers(gp) 734 } 735 736 if int(gp.stkbarPos) == len(gp.stkbar) { 737 // gp hit all of the stack barriers (or there 738 // were none). Re-scan the whole stack. 739 nextBarrier = ^uintptr(0) 740 } else { 741 // Only re-scan up to the lowest un-hit 742 // barrier. Any frames above this have not 743 // executed since the concurrent scan of gp and 744 // any writes through up-pointers to above 745 // this barrier had write barriers. 746 nextBarrier = gp.stkbar[gp.stkbarPos].savedLRPtr 747 if debugStackBarrier { 748 print("rescan below ", hex(nextBarrier), " in [", hex(sp), ",", hex(gp.stack.hi), ") goid=", gp.goid, "\n") 749 } 750 } 751 752 default: 753 throw("scanstack in wrong phase") 754 } 755 756 // Scan the stack. 757 var cache pcvalueCache 758 n := 0 759 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { 760 scanframeworker(frame, &cache, gcw) 761 762 if frame.fp > nextBarrier { 763 // We skip installing a barrier on bottom-most 764 // frame because on LR machines this LR is not 765 // on the stack. 766 if gcphase == _GCmark && n != 0 { 767 if gcInstallStackBarrier(gp, frame) { 768 barrierOffset *= 2 769 nextBarrier = sp + barrierOffset 770 } 771 } else if gcphase == _GCmarktermination { 772 // We just scanned a frame containing 773 // a return to a stack barrier. Since 774 // this frame never returned, we can 775 // stop scanning. 776 return false 777 } 778 } 779 n++ 780 781 return true 782 } 783 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) 784 tracebackdefers(gp, scanframe, nil) 785 gcUnlockStackBarriers(gp) 786 if gcphase == _GCmark { 787 // gp may have added itself to the rescan list between 788 // when GC started and now. It's clean now, so remove 789 // it. This isn't safe during mark termination because 790 // mark termination is consuming this list, but it's 791 // also not necessary. 792 dequeueRescan(gp) 793 } 794 gp.gcscanvalid = true 795 } 796 797 // Scan a stack frame: local variables and function arguments/results. 798 //go:nowritebarrier 799 func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { 800 801 f := frame.fn 802 targetpc := frame.continpc 803 if targetpc == 0 { 804 // Frame is dead. 805 return 806 } 807 if _DebugGC > 1 { 808 print("scanframe ", funcname(f), "\n") 809 } 810 if targetpc != f.entry { 811 targetpc-- 812 } 813 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) 814 if pcdata == -1 { 815 // We do not have a valid pcdata value but there might be a 816 // stackmap for this function. It is likely that we are looking 817 // at the function prologue, assume so and hope for the best. 818 pcdata = 0 819 } 820 821 // Scan local variables if stack frame has been allocated. 822 size := frame.varp - frame.sp 823 var minsize uintptr 824 switch sys.ArchFamily { 825 case sys.ARM64: 826 minsize = sys.SpAlign 827 default: 828 minsize = sys.MinFrameSize 829 } 830 if size > minsize { 831 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 832 if stkmap == nil || stkmap.n <= 0 { 833 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 834 throw("missing stackmap") 835 } 836 837 // Locals bitmap information, scan just the pointers in locals. 838 if pcdata < 0 || pcdata >= stkmap.n { 839 // don't know where we are 840 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 841 throw("scanframe: bad symbol table") 842 } 843 bv := stackmapdata(stkmap, pcdata) 844 size = uintptr(bv.n) * sys.PtrSize 845 scanblock(frame.varp-size, size, bv.bytedata, gcw) 846 } 847 848 // Scan arguments. 849 if frame.arglen > 0 { 850 var bv bitvector 851 if frame.argmap != nil { 852 bv = *frame.argmap 853 } else { 854 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 855 if stkmap == nil || stkmap.n <= 0 { 856 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n") 857 throw("missing stackmap") 858 } 859 if pcdata < 0 || pcdata >= stkmap.n { 860 // don't know where we are 861 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") 862 throw("scanframe: bad symbol table") 863 } 864 bv = stackmapdata(stkmap, pcdata) 865 } 866 scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw) 867 } 868 } 869 870 // queueRescan adds gp to the stack rescan list and clears 871 // gp.gcscanvalid. The caller must own gp and ensure that gp isn't 872 // already on the rescan list. 873 func queueRescan(gp *g) { 874 if gcphase == _GCoff { 875 gp.gcscanvalid = false 876 return 877 } 878 if gp.gcRescan != -1 { 879 throw("g already on rescan list") 880 } 881 882 lock(&work.rescan.lock) 883 gp.gcscanvalid = false 884 885 // Recheck gcphase under the lock in case there was a phase change. 886 if gcphase == _GCoff { 887 unlock(&work.rescan.lock) 888 return 889 } 890 if len(work.rescan.list) == cap(work.rescan.list) { 891 throw("rescan list overflow") 892 } 893 n := len(work.rescan.list) 894 gp.gcRescan = int32(n) 895 work.rescan.list = work.rescan.list[:n+1] 896 work.rescan.list[n].set(gp) 897 unlock(&work.rescan.lock) 898 } 899 900 // dequeueRescan removes gp from the stack rescan list, if gp is on 901 // the rescan list. The caller must own gp. 902 func dequeueRescan(gp *g) { 903 if gp.gcRescan == -1 { 904 return 905 } 906 if gcphase == _GCoff { 907 gp.gcRescan = -1 908 return 909 } 910 911 lock(&work.rescan.lock) 912 if work.rescan.list[gp.gcRescan].ptr() != gp { 913 throw("bad dequeueRescan") 914 } 915 // Careful: gp may itself be the last G on the list. 916 last := work.rescan.list[len(work.rescan.list)-1] 917 work.rescan.list[gp.gcRescan] = last 918 last.ptr().gcRescan = gp.gcRescan 919 gp.gcRescan = -1 920 work.rescan.list = work.rescan.list[:len(work.rescan.list)-1] 921 unlock(&work.rescan.lock) 922 } 923 924 type gcDrainFlags int 925 926 const ( 927 gcDrainUntilPreempt gcDrainFlags = 1 << iota 928 gcDrainNoBlock 929 gcDrainFlushBgCredit 930 931 // gcDrainBlock means neither gcDrainUntilPreempt or 932 // gcDrainNoBlock. It is the default, but callers should use 933 // the constant for documentation purposes. 934 gcDrainBlock gcDrainFlags = 0 935 ) 936 937 // gcDrain scans roots and objects in work buffers, blackening grey 938 // objects until all roots and work buffers have been drained. 939 // 940 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt 941 // is set. This implies gcDrainNoBlock. 942 // 943 // If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is 944 // unable to get more work. Otherwise, it will block until all 945 // blocking calls are blocked in gcDrain. 946 // 947 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work 948 // credit to gcController.bgScanCredit every gcCreditSlack units of 949 // scan work. 950 // 951 //go:nowritebarrier 952 func gcDrain(gcw *gcWork, flags gcDrainFlags) { 953 if !writeBarrier.needed { 954 throw("gcDrain phase incorrect") 955 } 956 957 gp := getg() 958 preemptible := flags&gcDrainUntilPreempt != 0 959 blocking := flags&(gcDrainUntilPreempt|gcDrainNoBlock) == 0 960 flushBgCredit := flags&gcDrainFlushBgCredit != 0 961 962 // Drain root marking jobs. 963 if work.markrootNext < work.markrootJobs { 964 for blocking || !gp.preempt { 965 job := atomic.Xadd(&work.markrootNext, +1) - 1 966 if job >= work.markrootJobs { 967 break 968 } 969 markroot(gcw, job) 970 } 971 } 972 973 initScanWork := gcw.scanWork 974 975 // Drain heap marking jobs. 976 for !(preemptible && gp.preempt) { 977 // Try to keep work available on the global queue. We used to 978 // check if there were waiting workers, but it's better to 979 // just keep work available than to make workers wait. In the 980 // worst case, we'll do O(log(_WorkbufSize)) unnecessary 981 // balances. 982 if work.full == 0 { 983 gcw.balance() 984 } 985 986 var b uintptr 987 if blocking { 988 b = gcw.get() 989 } else { 990 b = gcw.tryGetFast() 991 if b == 0 { 992 b = gcw.tryGet() 993 } 994 } 995 if b == 0 { 996 // work barrier reached or tryGet failed. 997 break 998 } 999 scanobject(b, gcw) 1000 1001 // Flush background scan work credit to the global 1002 // account if we've accumulated enough locally so 1003 // mutator assists can draw on it. 1004 if gcw.scanWork >= gcCreditSlack { 1005 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1006 if flushBgCredit { 1007 gcFlushBgCredit(gcw.scanWork - initScanWork) 1008 initScanWork = 0 1009 } 1010 gcw.scanWork = 0 1011 } 1012 } 1013 1014 // In blocking mode, write barriers are not allowed after this 1015 // point because we must preserve the condition that the work 1016 // buffers are empty. 1017 1018 // Flush remaining scan work credit. 1019 if gcw.scanWork > 0 { 1020 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1021 if flushBgCredit { 1022 gcFlushBgCredit(gcw.scanWork - initScanWork) 1023 } 1024 gcw.scanWork = 0 1025 } 1026 } 1027 1028 // gcDrainN blackens grey objects until it has performed roughly 1029 // scanWork units of scan work or the G is preempted. This is 1030 // best-effort, so it may perform less work if it fails to get a work 1031 // buffer. Otherwise, it will perform at least n units of work, but 1032 // may perform more because scanning is always done in whole object 1033 // increments. It returns the amount of scan work performed. 1034 //go:nowritebarrier 1035 func gcDrainN(gcw *gcWork, scanWork int64) int64 { 1036 if !writeBarrier.needed { 1037 throw("gcDrainN phase incorrect") 1038 } 1039 1040 // There may already be scan work on the gcw, which we don't 1041 // want to claim was done by this call. 1042 workFlushed := -gcw.scanWork 1043 1044 gp := getg().m.curg 1045 for !gp.preempt && workFlushed+gcw.scanWork < scanWork { 1046 // See gcDrain comment. 1047 if work.full == 0 { 1048 gcw.balance() 1049 } 1050 1051 // This might be a good place to add prefetch code... 1052 // if(wbuf.nobj > 4) { 1053 // PREFETCH(wbuf->obj[wbuf.nobj - 3]; 1054 // } 1055 // 1056 b := gcw.tryGetFast() 1057 if b == 0 { 1058 b = gcw.tryGet() 1059 } 1060 1061 if b == 0 { 1062 break 1063 } 1064 scanobject(b, gcw) 1065 1066 // Flush background scan work credit. 1067 if gcw.scanWork >= gcCreditSlack { 1068 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork) 1069 workFlushed += gcw.scanWork 1070 gcw.scanWork = 0 1071 } 1072 } 1073 1074 // Unlike gcDrain, there's no need to flush remaining work 1075 // here because this never flushes to bgScanCredit and 1076 // gcw.dispose will flush any remaining work to scanWork. 1077 1078 return workFlushed + gcw.scanWork 1079 } 1080 1081 // scanblock scans b as scanobject would, but using an explicit 1082 // pointer bitmap instead of the heap bitmap. 1083 // 1084 // This is used to scan non-heap roots, so it does not update 1085 // gcw.bytesMarked or gcw.scanWork. 1086 // 1087 //go:nowritebarrier 1088 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { 1089 // Use local copies of original parameters, so that a stack trace 1090 // due to one of the throws below shows the original block 1091 // base and extent. 1092 b := b0 1093 n := n0 1094 1095 arena_start := mheap_.arena_start 1096 arena_used := mheap_.arena_used 1097 1098 for i := uintptr(0); i < n; { 1099 // Find bits for the next word. 1100 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8))) 1101 if bits == 0 { 1102 i += sys.PtrSize * 8 1103 continue 1104 } 1105 for j := 0; j < 8 && i < n; j++ { 1106 if bits&1 != 0 { 1107 // Same work as in scanobject; see comments there. 1108 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1109 if obj != 0 && arena_start <= obj && obj < arena_used { 1110 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1111 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1112 } 1113 } 1114 } 1115 bits >>= 1 1116 i += sys.PtrSize 1117 } 1118 } 1119 } 1120 1121 // scanobject scans the object starting at b, adding pointers to gcw. 1122 // b must point to the beginning of a heap object; scanobject consults 1123 // the GC bitmap for the pointer mask and the spans for the size of the 1124 // object. 1125 //go:nowritebarrier 1126 func scanobject(b uintptr, gcw *gcWork) { 1127 // Note that arena_used may change concurrently during 1128 // scanobject and hence scanobject may encounter a pointer to 1129 // a newly allocated heap object that is *not* in 1130 // [start,used). It will not mark this object; however, we 1131 // know that it was just installed by a mutator, which means 1132 // that mutator will execute a write barrier and take care of 1133 // marking it. This is even more pronounced on relaxed memory 1134 // architectures since we access arena_used without barriers 1135 // or synchronization, but the same logic applies. 1136 arena_start := mheap_.arena_start 1137 arena_used := mheap_.arena_used 1138 1139 // Find bits of the beginning of the object. 1140 // b must point to the beginning of a heap object, so 1141 // we can get its bits and span directly. 1142 hbits := heapBitsForAddr(b) 1143 s := spanOfUnchecked(b) 1144 n := s.elemsize 1145 if n == 0 { 1146 throw("scanobject n == 0") 1147 } 1148 1149 var i uintptr 1150 for i = 0; i < n; i += sys.PtrSize { 1151 // Find bits for this word. 1152 if i != 0 { 1153 // Avoid needless hbits.next() on last iteration. 1154 hbits = hbits.next() 1155 } 1156 // During checkmarking, 1-word objects store the checkmark 1157 // in the type bit for the one word. The only one-word objects 1158 // are pointers, or else they'd be merged with other non-pointer 1159 // data into larger allocations. 1160 if i != 1*sys.PtrSize && !hbits.morePointers() { 1161 break // no more pointers in this object 1162 } 1163 if !hbits.isPointer() { 1164 continue // not a pointer 1165 } 1166 1167 // Work here is duplicated in scanblock and above. 1168 // If you make changes here, make changes there too. 1169 obj := *(*uintptr)(unsafe.Pointer(b + i)) 1170 1171 // At this point we have extracted the next potential pointer. 1172 // Check if it points into heap and not back at the current object. 1173 if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n { 1174 // Mark the object. 1175 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 { 1176 greyobject(obj, b, i, hbits, span, gcw, objIndex) 1177 } 1178 } 1179 } 1180 gcw.bytesMarked += uint64(n) 1181 gcw.scanWork += int64(i) 1182 } 1183 1184 // Shade the object if it isn't already. 1185 // The object is not nil and known to be in the heap. 1186 // Preemption must be disabled. 1187 //go:nowritebarrier 1188 func shade(b uintptr) { 1189 if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 { 1190 gcw := &getg().m.p.ptr().gcw 1191 greyobject(obj, 0, 0, hbits, span, gcw, objIndex) 1192 if gcphase == _GCmarktermination || gcBlackenPromptly { 1193 // Ps aren't allowed to cache work during mark 1194 // termination. 1195 gcw.dispose() 1196 } 1197 } 1198 } 1199 1200 // obj is the start of an object with mark mbits. 1201 // If it isn't already marked, mark it and enqueue into gcw. 1202 // base and off are for debugging only and could be removed. 1203 //go:nowritebarrierrec 1204 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) { 1205 // obj should be start of allocation, and so must be at least pointer-aligned. 1206 if obj&(sys.PtrSize-1) != 0 { 1207 throw("greyobject: obj not pointer-aligned") 1208 } 1209 mbits := span.markBitsForIndex(objIndex) 1210 1211 if useCheckmark { 1212 if !mbits.isMarked() { 1213 printlock() 1214 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n") 1215 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") 1216 1217 // Dump the source (base) object 1218 gcDumpObject("base", base, off) 1219 1220 // Dump the object 1221 gcDumpObject("obj", obj, ^uintptr(0)) 1222 1223 throw("checkmark found unmarked object") 1224 } 1225 if hbits.isCheckmarked(span.elemsize) { 1226 return 1227 } 1228 hbits.setCheckmarked(span.elemsize) 1229 if !hbits.isCheckmarked(span.elemsize) { 1230 throw("setCheckmarked and isCheckmarked disagree") 1231 } 1232 } else { 1233 // If marked we have nothing to do. 1234 if mbits.isMarked() { 1235 return 1236 } 1237 // mbits.setMarked() // Avoid extra call overhead with manual inlining. 1238 atomic.Or8(mbits.bytep, mbits.mask) 1239 // If this is a noscan object, fast-track it to black 1240 // instead of greying it. 1241 if !hbits.hasPointers(span.elemsize) { 1242 gcw.bytesMarked += uint64(span.elemsize) 1243 return 1244 } 1245 } 1246 1247 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but 1248 // seems like a nice optimization that can be added back in. 1249 // There needs to be time between the PREFETCH and the use. 1250 // Previously we put the obj in an 8 element buffer that is drained at a rate 1251 // to give the PREFETCH time to do its work. 1252 // Use of PREFETCHNTA might be more appropriate than PREFETCH 1253 if !gcw.putFast(obj) { 1254 gcw.put(obj) 1255 } 1256 } 1257 1258 // gcDumpObject dumps the contents of obj for debugging and marks the 1259 // field at byte offset off in obj. 1260 func gcDumpObject(label string, obj, off uintptr) { 1261 if obj < mheap_.arena_start || obj >= mheap_.arena_used { 1262 print(label, "=", hex(obj), " is not in the Go heap\n") 1263 return 1264 } 1265 k := obj >> _PageShift 1266 x := k 1267 x -= mheap_.arena_start >> _PageShift 1268 s := h_spans[x] 1269 print(label, "=", hex(obj), " k=", hex(k)) 1270 if s == nil { 1271 print(" s=nil\n") 1272 return 1273 } 1274 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n") 1275 skipped := false 1276 for i := uintptr(0); i < s.elemsize; i += sys.PtrSize { 1277 // For big objects, just print the beginning (because 1278 // that usually hints at the object's type) and the 1279 // fields around off. 1280 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) { 1281 skipped = true 1282 continue 1283 } 1284 if skipped { 1285 print(" ...\n") 1286 skipped = false 1287 } 1288 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) 1289 if i == off { 1290 print(" <==") 1291 } 1292 print("\n") 1293 } 1294 if skipped { 1295 print(" ...\n") 1296 } 1297 } 1298 1299 // gcmarknewobject marks a newly allocated object black. obj must 1300 // not contain any non-nil pointers. 1301 // 1302 // This is nosplit so it can manipulate a gcWork without preemption. 1303 // 1304 //go:nowritebarrier 1305 //go:nosplit 1306 func gcmarknewobject(obj, size, scanSize uintptr) { 1307 if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen. 1308 throw("gcmarknewobject called while doing checkmark") 1309 } 1310 markBitsForAddr(obj).setMarked() 1311 gcw := &getg().m.p.ptr().gcw 1312 gcw.bytesMarked += uint64(size) 1313 gcw.scanWork += int64(scanSize) 1314 } 1315 1316 // Checkmarking 1317 1318 // To help debug the concurrent GC we remark with the world 1319 // stopped ensuring that any object encountered has their normal 1320 // mark bit set. To do this we use an orthogonal bit 1321 // pattern to indicate the object is marked. The following pattern 1322 // uses the upper two bits in the object's boundary nibble. 1323 // 01: scalar not marked 1324 // 10: pointer not marked 1325 // 11: pointer marked 1326 // 00: scalar marked 1327 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa. 1328 // The higher bit is 1 for pointers and 0 for scalars, whether the object 1329 // is marked or not. 1330 // The first nibble no longer holds the typeDead pattern indicating that the 1331 // there are no more pointers in the object. This information is held 1332 // in the second nibble. 1333 1334 // If useCheckmark is true, marking of an object uses the 1335 // checkmark bits (encoding above) instead of the standard 1336 // mark bits. 1337 var useCheckmark = false 1338 1339 //go:nowritebarrier 1340 func initCheckmarks() { 1341 useCheckmark = true 1342 for _, s := range work.spans { 1343 if s.state == _MSpanInUse { 1344 heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout()) 1345 } 1346 } 1347 } 1348 1349 func clearCheckmarks() { 1350 useCheckmark = false 1351 for _, s := range work.spans { 1352 if s.state == _MSpanInUse { 1353 heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout()) 1354 } 1355 } 1356 }