github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/mgcsweep.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: sweeping
     6  
     7  // The sweeper consists of two different algorithms:
     8  //
     9  // * The object reclaimer finds and frees unmarked slots in spans. It
    10  //   can free a whole span if none of the objects are marked, but that
    11  //   isn't its goal. This can be driven either synchronously by
    12  //   mcentral.cacheSpan for mcentral spans, or asynchronously by
    13  //   sweepone, which looks at all the mcentral lists.
    14  //
    15  // * The span reclaimer looks for spans that contain no marked objects
    16  //   and frees whole spans. This is a separate algorithm because
    17  //   freeing whole spans is the hardest task for the object reclaimer,
    18  //   but is critical when allocating new spans. The entry point for
    19  //   this is mheap_.reclaim and it's driven by a sequential scan of
    20  //   the page marks bitmap in the heap arenas.
    21  //
    22  // Both algorithms ultimately call mspan.sweep, which sweeps a single
    23  // heap span.
    24  
    25  package runtime
    26  
    27  import (
    28  	"runtime/internal/atomic"
    29  	"unsafe"
    30  )
    31  
    32  var sweep sweepdata
    33  
    34  // State of background sweep.
    35  type sweepdata struct {
    36  	lock    mutex
    37  	g       *g
    38  	parked  bool
    39  	started bool
    40  
    41  	nbgsweep    uint32
    42  	npausesweep uint32
    43  
    44  	// centralIndex is the current unswept span class.
    45  	// It represents an index into the mcentral span
    46  	// sets. Accessed and updated via its load and
    47  	// update methods. Not protected by a lock.
    48  	//
    49  	// Reset at mark termination.
    50  	// Used by mheap.nextSpanForSweep.
    51  	centralIndex sweepClass
    52  }
    53  
    54  // sweepClass is a spanClass and one bit to represent whether we're currently
    55  // sweeping partial or full spans.
    56  type sweepClass uint32
    57  
    58  const (
    59  	numSweepClasses            = numSpanClasses * 2
    60  	sweepClassDone  sweepClass = sweepClass(^uint32(0))
    61  )
    62  
    63  func (s *sweepClass) load() sweepClass {
    64  	return sweepClass(atomic.Load((*uint32)(s)))
    65  }
    66  
    67  func (s *sweepClass) update(sNew sweepClass) {
    68  	// Only update *s if its current value is less than sNew,
    69  	// since *s increases monotonically.
    70  	sOld := s.load()
    71  	for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
    72  		sOld = s.load()
    73  	}
    74  	// TODO(mknyszek): This isn't the only place we have
    75  	// an atomic monotonically increasing counter. It would
    76  	// be nice to have an "atomic max" which is just implemented
    77  	// as the above on most architectures. Some architectures
    78  	// like RISC-V however have native support for an atomic max.
    79  }
    80  
    81  func (s *sweepClass) clear() {
    82  	atomic.Store((*uint32)(s), 0)
    83  }
    84  
    85  // split returns the underlying span class as well as
    86  // whether we're interested in the full or partial
    87  // unswept lists for that class, indicated as a boolean
    88  // (true means "full").
    89  func (s sweepClass) split() (spc spanClass, full bool) {
    90  	return spanClass(s >> 1), s&1 == 0
    91  }
    92  
    93  // nextSpanForSweep finds and pops the next span for sweeping from the
    94  // central sweep buffers. It returns ownership of the span to the caller.
    95  // Returns nil if no such span exists.
    96  func (h *mheap) nextSpanForSweep() *mspan {
    97  	sg := h.sweepgen
    98  	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
    99  		spc, full := sc.split()
   100  		c := &h.central[spc].mcentral
   101  		var s *mspan
   102  		if full {
   103  			s = c.fullUnswept(sg).pop()
   104  		} else {
   105  			s = c.partialUnswept(sg).pop()
   106  		}
   107  		if s != nil {
   108  			// Write down that we found something so future sweepers
   109  			// can start from here.
   110  			sweep.centralIndex.update(sc)
   111  			return s
   112  		}
   113  	}
   114  	// Write down that we found nothing.
   115  	sweep.centralIndex.update(sweepClassDone)
   116  	return nil
   117  }
   118  
   119  // finishsweep_m ensures that all spans are swept.
   120  //
   121  // The world must be stopped. This ensures there are no sweeps in
   122  // progress.
   123  //
   124  //go:nowritebarrier
   125  func finishsweep_m() {
   126  	assertWorldStopped()
   127  
   128  	// Sweeping must be complete before marking commences, so
   129  	// sweep any unswept spans. If this is a concurrent GC, there
   130  	// shouldn't be any spans left to sweep, so this should finish
   131  	// instantly. If GC was forced before the concurrent sweep
   132  	// finished, there may be spans to sweep.
   133  	for sweepone() != ^uintptr(0) {
   134  		sweep.npausesweep++
   135  	}
   136  
   137  	// Reset all the unswept buffers, which should be empty.
   138  	// Do this in sweep termination as opposed to mark termination
   139  	// so that we can catch unswept spans and reclaim blocks as
   140  	// soon as possible.
   141  	sg := mheap_.sweepgen
   142  	for i := range mheap_.central {
   143  		c := &mheap_.central[i].mcentral
   144  		c.partialUnswept(sg).reset()
   145  		c.fullUnswept(sg).reset()
   146  	}
   147  
   148  	// Sweeping is done, so if the scavenger isn't already awake,
   149  	// wake it up. There's definitely work for it to do at this
   150  	// point.
   151  	wakeScavenger()
   152  
   153  	nextMarkBitArenaEpoch()
   154  }
   155  
   156  func bgsweep(c chan int) {
   157  	sweep.g = getg()
   158  
   159  	lockInit(&sweep.lock, lockRankSweep)
   160  	lock(&sweep.lock)
   161  	sweep.parked = true
   162  	c <- 1
   163  	goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
   164  
   165  	for {
   166  		for sweepone() != ^uintptr(0) {
   167  			sweep.nbgsweep++
   168  			Gosched()
   169  		}
   170  		for freeSomeWbufs(true) {
   171  			Gosched()
   172  		}
   173  		lock(&sweep.lock)
   174  		if !isSweepDone() {
   175  			// This can happen if a GC runs between
   176  			// gosweepone returning ^0 above
   177  			// and the lock being acquired.
   178  			unlock(&sweep.lock)
   179  			continue
   180  		}
   181  		sweep.parked = true
   182  		goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
   183  	}
   184  }
   185  
   186  // sweepone sweeps some unswept heap span and returns the number of pages returned
   187  // to the heap, or ^uintptr(0) if there was nothing to sweep.
   188  func sweepone() uintptr {
   189  	_g_ := getg()
   190  	sweepRatio := mheap_.sweepPagesPerByte // For debugging
   191  
   192  	// increment locks to ensure that the goroutine is not preempted
   193  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
   194  	_g_.m.locks++
   195  	if atomic.Load(&mheap_.sweepdone) != 0 {
   196  		_g_.m.locks--
   197  		return ^uintptr(0)
   198  	}
   199  	atomic.Xadd(&mheap_.sweepers, +1)
   200  
   201  	// Find a span to sweep.
   202  	var s *mspan
   203  	sg := mheap_.sweepgen
   204  	for {
   205  		s = mheap_.nextSpanForSweep()
   206  		if s == nil {
   207  			atomic.Store(&mheap_.sweepdone, 1)
   208  			break
   209  		}
   210  		if state := s.state.get(); state != mSpanInUse {
   211  			// This can happen if direct sweeping already
   212  			// swept this span, but in that case the sweep
   213  			// generation should always be up-to-date.
   214  			if !(s.sweepgen == sg || s.sweepgen == sg+3) {
   215  				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
   216  				throw("non in-use span in unswept list")
   217  			}
   218  			continue
   219  		}
   220  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   221  			break
   222  		}
   223  	}
   224  
   225  	// Sweep the span we found.
   226  	npages := ^uintptr(0)
   227  	if s != nil {
   228  		npages = s.npages
   229  		if s.sweep(false) {
   230  			// Whole span was freed. Count it toward the
   231  			// page reclaimer credit since these pages can
   232  			// now be used for span allocation.
   233  			atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
   234  		} else {
   235  			// Span is still in-use, so this returned no
   236  			// pages to the heap and the span needs to
   237  			// move to the swept in-use list.
   238  			npages = 0
   239  		}
   240  	}
   241  
   242  	// Decrement the number of active sweepers and if this is the
   243  	// last one print trace information.
   244  	if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
   245  		// Since the sweeper is done, move the scavenge gen forward (signalling
   246  		// that there's new work to do) and wake the scavenger.
   247  		//
   248  		// The scavenger is signaled by the last sweeper because once
   249  		// sweeping is done, we will definitely have useful work for
   250  		// the scavenger to do, since the scavenger only runs over the
   251  		// heap once per GC cyle. This update is not done during sweep
   252  		// termination because in some cases there may be a long delay
   253  		// between sweep done and sweep termination (e.g. not enough
   254  		// allocations to trigger a GC) which would be nice to fill in
   255  		// with scavenging work.
   256  		systemstack(func() {
   257  			lock(&mheap_.lock)
   258  			mheap_.pages.scavengeStartGen()
   259  			unlock(&mheap_.lock)
   260  		})
   261  		// Since we might sweep in an allocation path, it's not possible
   262  		// for us to wake the scavenger directly via wakeScavenger, since
   263  		// it could allocate. Ask sysmon to do it for us instead.
   264  		readyForScavenger()
   265  
   266  		if debug.gcpacertrace > 0 {
   267  			print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
   268  		}
   269  	}
   270  	_g_.m.locks--
   271  	return npages
   272  }
   273  
   274  // isSweepDone reports whether all spans are swept or currently being swept.
   275  //
   276  // Note that this condition may transition from false to true at any
   277  // time as the sweeper runs. It may transition from true to false if a
   278  // GC runs; to prevent that the caller must be non-preemptible or must
   279  // somehow block GC progress.
   280  func isSweepDone() bool {
   281  	return mheap_.sweepdone != 0
   282  }
   283  
   284  // Returns only when span s has been swept.
   285  //go:nowritebarrier
   286  func (s *mspan) ensureSwept() {
   287  	// Caller must disable preemption.
   288  	// Otherwise when this function returns the span can become unswept again
   289  	// (if GC is triggered on another goroutine).
   290  	_g_ := getg()
   291  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   292  		throw("mspan.ensureSwept: m is not locked")
   293  	}
   294  
   295  	sg := mheap_.sweepgen
   296  	spangen := atomic.Load(&s.sweepgen)
   297  	if spangen == sg || spangen == sg+3 {
   298  		return
   299  	}
   300  	// The caller must be sure that the span is a mSpanInUse span.
   301  	if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   302  		s.sweep(false)
   303  		return
   304  	}
   305  	// unfortunate condition, and we don't have efficient means to wait
   306  	for {
   307  		spangen := atomic.Load(&s.sweepgen)
   308  		if spangen == sg || spangen == sg+3 {
   309  			break
   310  		}
   311  		osyield()
   312  	}
   313  }
   314  
   315  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
   316  // It clears the mark bits in preparation for the next GC round.
   317  // Returns true if the span was returned to heap.
   318  // If preserve=true, don't return it to heap nor relink in mcentral lists;
   319  // caller takes care of it.
   320  func (s *mspan) sweep(preserve bool) bool {
   321  	// It's critical that we enter this function with preemption disabled,
   322  	// GC must not start while we are in the middle of this function.
   323  	_g_ := getg()
   324  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   325  		throw("mspan.sweep: m is not locked")
   326  	}
   327  	sweepgen := mheap_.sweepgen
   328  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   329  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   330  		throw("mspan.sweep: bad span state")
   331  	}
   332  
   333  	if trace.enabled {
   334  		traceGCSweepSpan(s.npages * _PageSize)
   335  	}
   336  
   337  	atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
   338  
   339  	spc := s.spanclass
   340  	size := s.elemsize
   341  
   342  	// The allocBits indicate which unmarked objects don't need to be
   343  	// processed since they were free at the end of the last GC cycle
   344  	// and were not allocated since then.
   345  	// If the allocBits index is >= s.freeindex and the bit
   346  	// is not marked then the object remains unallocated
   347  	// since the last GC.
   348  	// This situation is analogous to being on a freelist.
   349  
   350  	// Unlink & free special records for any objects we're about to free.
   351  	// Two complications here:
   352  	// 1. An object can have both finalizer and profile special records.
   353  	//    In such case we need to queue finalizer for execution,
   354  	//    mark the object as live and preserve the profile special.
   355  	// 2. A tiny object can have several finalizers setup for different offsets.
   356  	//    If such object is not marked, we need to queue all finalizers at once.
   357  	// Both 1 and 2 are possible at the same time.
   358  	hadSpecials := s.specials != nil
   359  	specialp := &s.specials
   360  	special := *specialp
   361  	for special != nil {
   362  		// A finalizer can be set for an inner byte of an object, find object beginning.
   363  		objIndex := uintptr(special.offset) / size
   364  		p := s.base() + objIndex*size
   365  		mbits := s.markBitsForIndex(objIndex)
   366  		if !mbits.isMarked() {
   367  			// This object is not marked and has at least one special record.
   368  			// Pass 1: see if it has at least one finalizer.
   369  			hasFin := false
   370  			endOffset := p - s.base() + size
   371  			for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
   372  				if tmp.kind == _KindSpecialFinalizer {
   373  					// Stop freeing of object if it has a finalizer.
   374  					mbits.setMarkedNonAtomic()
   375  					hasFin = true
   376  					break
   377  				}
   378  			}
   379  			// Pass 2: queue all finalizers _or_ handle profile record.
   380  			for special != nil && uintptr(special.offset) < endOffset {
   381  				// Find the exact byte for which the special was setup
   382  				// (as opposed to object beginning).
   383  				p := s.base() + uintptr(special.offset)
   384  				if special.kind == _KindSpecialFinalizer || !hasFin {
   385  					// Splice out special record.
   386  					y := special
   387  					special = special.next
   388  					*specialp = special
   389  					freespecial(y, unsafe.Pointer(p), size)
   390  				} else {
   391  					// This is profile record, but the object has finalizers (so kept alive).
   392  					// Keep special record.
   393  					specialp = &special.next
   394  					special = *specialp
   395  				}
   396  			}
   397  		} else {
   398  			// object is still live: keep special record
   399  			specialp = &special.next
   400  			special = *specialp
   401  		}
   402  	}
   403  	if hadSpecials && s.specials == nil {
   404  		spanHasNoSpecials(s)
   405  	}
   406  
   407  	if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
   408  		// Find all newly freed objects. This doesn't have to
   409  		// efficient; allocfreetrace has massive overhead.
   410  		mbits := s.markBitsForBase()
   411  		abits := s.allocBitsForIndex(0)
   412  		for i := uintptr(0); i < s.nelems; i++ {
   413  			if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
   414  				x := s.base() + i*s.elemsize
   415  				if debug.allocfreetrace != 0 {
   416  					tracefree(unsafe.Pointer(x), size)
   417  				}
   418  				if debug.clobberfree != 0 {
   419  					clobberfree(unsafe.Pointer(x), size)
   420  				}
   421  				if raceenabled {
   422  					racefree(unsafe.Pointer(x), size)
   423  				}
   424  				if msanenabled {
   425  					msanfree(unsafe.Pointer(x), size)
   426  				}
   427  			}
   428  			mbits.advance()
   429  			abits.advance()
   430  		}
   431  	}
   432  
   433  	// Check for zombie objects.
   434  	if s.freeindex < s.nelems {
   435  		// Everything < freeindex is allocated and hence
   436  		// cannot be zombies.
   437  		//
   438  		// Check the first bitmap byte, where we have to be
   439  		// careful with freeindex.
   440  		obj := s.freeindex
   441  		if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
   442  			s.reportZombies()
   443  		}
   444  		// Check remaining bytes.
   445  		for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
   446  			if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
   447  				s.reportZombies()
   448  			}
   449  		}
   450  	}
   451  
   452  	// Count the number of free objects in this span.
   453  	nalloc := uint16(s.countAlloc())
   454  	nfreed := s.allocCount - nalloc
   455  	if nalloc > s.allocCount {
   456  		// The zombie check above should have caught this in
   457  		// more detail.
   458  		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
   459  		throw("sweep increased allocation count")
   460  	}
   461  
   462  	s.allocCount = nalloc
   463  	s.freeindex = 0 // reset allocation index to start of span.
   464  	if trace.enabled {
   465  		getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
   466  	}
   467  
   468  	// gcmarkBits becomes the allocBits.
   469  	// get a fresh cleared gcmarkBits in preparation for next GC
   470  	s.allocBits = s.gcmarkBits
   471  	s.gcmarkBits = newMarkBits(s.nelems)
   472  
   473  	// Initialize alloc bits cache.
   474  	s.refillAllocCache(0)
   475  
   476  	// The span must be in our exclusive ownership until we update sweepgen,
   477  	// check for potential races.
   478  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   479  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   480  		throw("mspan.sweep: bad span state after sweep")
   481  	}
   482  	if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
   483  		throw("swept cached span")
   484  	}
   485  
   486  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
   487  	// because of the potential for a concurrent free/SetFinalizer.
   488  	//
   489  	// But we need to set it before we make the span available for allocation
   490  	// (return it to heap or mcentral), because allocation code assumes that a
   491  	// span is already swept if available for allocation.
   492  	//
   493  	// Serialization point.
   494  	// At this point the mark bits are cleared and allocation ready
   495  	// to go so release the span.
   496  	atomic.Store(&s.sweepgen, sweepgen)
   497  
   498  	if spc.sizeclass() != 0 {
   499  		// Handle spans for small objects.
   500  		if nfreed > 0 {
   501  			// Only mark the span as needing zeroing if we've freed any
   502  			// objects, because a fresh span that had been allocated into,
   503  			// wasn't totally filled, but then swept, still has all of its
   504  			// free slots zeroed.
   505  			s.needzero = 1
   506  			stats := memstats.heapStats.acquire()
   507  			atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
   508  			memstats.heapStats.release()
   509  		}
   510  		if !preserve {
   511  			// The caller may not have removed this span from whatever
   512  			// unswept set its on but taken ownership of the span for
   513  			// sweeping by updating sweepgen. If this span still is in
   514  			// an unswept set, then the mcentral will pop it off the
   515  			// set, check its sweepgen, and ignore it.
   516  			if nalloc == 0 {
   517  				// Free totally free span directly back to the heap.
   518  				mheap_.freeSpan(s)
   519  				return true
   520  			}
   521  			// Return span back to the right mcentral list.
   522  			if uintptr(nalloc) == s.nelems {
   523  				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   524  			} else {
   525  				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
   526  			}
   527  		}
   528  	} else if !preserve {
   529  		// Handle spans for large objects.
   530  		if nfreed != 0 {
   531  			// Free large object span to heap.
   532  
   533  			// NOTE(rsc,dvyukov): The original implementation of efence
   534  			// in CL 22060046 used sysFree instead of sysFault, so that
   535  			// the operating system would eventually give the memory
   536  			// back to us again, so that an efence program could run
   537  			// longer without running out of memory. Unfortunately,
   538  			// calling sysFree here without any kind of adjustment of the
   539  			// heap data structures means that when the memory does
   540  			// come back to us, we have the wrong metadata for it, either in
   541  			// the mspan structures or in the garbage collection bitmap.
   542  			// Using sysFault here means that the program will run out of
   543  			// memory fairly quickly in efence mode, but at least it won't
   544  			// have mysterious crashes due to confused memory reuse.
   545  			// It should be possible to switch back to sysFree if we also
   546  			// implement and then call some kind of mheap.deleteSpan.
   547  			if debug.efence > 0 {
   548  				s.limit = 0 // prevent mlookup from finding this span
   549  				sysFault(unsafe.Pointer(s.base()), size)
   550  			} else {
   551  				mheap_.freeSpan(s)
   552  			}
   553  			stats := memstats.heapStats.acquire()
   554  			atomic.Xadduintptr(&stats.largeFreeCount, 1)
   555  			atomic.Xadduintptr(&stats.largeFree, size)
   556  			memstats.heapStats.release()
   557  			return true
   558  		}
   559  
   560  		// Add a large span directly onto the full+swept list.
   561  		mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   562  	}
   563  	return false
   564  }
   565  
   566  // reportZombies reports any marked but free objects in s and throws.
   567  //
   568  // This generally means one of the following:
   569  //
   570  // 1. User code converted a pointer to a uintptr and then back
   571  // unsafely, and a GC ran while the uintptr was the only reference to
   572  // an object.
   573  //
   574  // 2. User code (or a compiler bug) constructed a bad pointer that
   575  // points to a free slot, often a past-the-end pointer.
   576  //
   577  // 3. The GC two cycles ago missed a pointer and freed a live object,
   578  // but it was still live in the last cycle, so this GC cycle found a
   579  // pointer to that object and marked it.
   580  func (s *mspan) reportZombies() {
   581  	printlock()
   582  	print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
   583  	mbits := s.markBitsForBase()
   584  	abits := s.allocBitsForIndex(0)
   585  	for i := uintptr(0); i < s.nelems; i++ {
   586  		addr := s.base() + i*s.elemsize
   587  		print(hex(addr))
   588  		alloc := i < s.freeindex || abits.isMarked()
   589  		if alloc {
   590  			print(" alloc")
   591  		} else {
   592  			print(" free ")
   593  		}
   594  		if mbits.isMarked() {
   595  			print(" marked  ")
   596  		} else {
   597  			print(" unmarked")
   598  		}
   599  		zombie := mbits.isMarked() && !alloc
   600  		if zombie {
   601  			print(" zombie")
   602  		}
   603  		print("\n")
   604  		if zombie {
   605  			length := s.elemsize
   606  			if length > 1024 {
   607  				length = 1024
   608  			}
   609  			hexdumpWords(addr, addr+length, nil)
   610  		}
   611  		mbits.advance()
   612  		abits.advance()
   613  	}
   614  	throw("found pointer to free object")
   615  }
   616  
   617  // deductSweepCredit deducts sweep credit for allocating a span of
   618  // size spanBytes. This must be performed *before* the span is
   619  // allocated to ensure the system has enough credit. If necessary, it
   620  // performs sweeping to prevent going in to debt. If the caller will
   621  // also sweep pages (e.g., for a large allocation), it can pass a
   622  // non-zero callerSweepPages to leave that many pages unswept.
   623  //
   624  // deductSweepCredit makes a worst-case assumption that all spanBytes
   625  // bytes of the ultimately allocated span will be available for object
   626  // allocation.
   627  //
   628  // deductSweepCredit is the core of the "proportional sweep" system.
   629  // It uses statistics gathered by the garbage collector to perform
   630  // enough sweeping so that all pages are swept during the concurrent
   631  // sweep phase between GC cycles.
   632  //
   633  // mheap_ must NOT be locked.
   634  func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
   635  	if mheap_.sweepPagesPerByte == 0 {
   636  		// Proportional sweep is done or disabled.
   637  		return
   638  	}
   639  
   640  	if trace.enabled {
   641  		traceGCSweepStart()
   642  	}
   643  
   644  retry:
   645  	sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
   646  
   647  	// Fix debt if necessary.
   648  	newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
   649  	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
   650  	for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
   651  		if sweepone() == ^uintptr(0) {
   652  			mheap_.sweepPagesPerByte = 0
   653  			break
   654  		}
   655  		if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
   656  			// Sweep pacing changed. Recompute debt.
   657  			goto retry
   658  		}
   659  	}
   660  
   661  	if trace.enabled {
   662  		traceGCSweepDone()
   663  	}
   664  }
   665  
   666  // clobberfree sets the memory content at x to bad content, for debugging
   667  // purposes.
   668  func clobberfree(x unsafe.Pointer, size uintptr) {
   669  	// size (span.elemsize) is always a multiple of 4.
   670  	for i := uintptr(0); i < size; i += 4 {
   671  		*(*uint32)(add(x, i)) = 0xdeadbeef
   672  	}
   673  }