github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/mgcsweep.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: sweeping
     6  
     7  // The sweeper consists of two different algorithms:
     8  //
     9  // * The object reclaimer finds and frees unmarked slots in spans. It
    10  //   can free a whole span if none of the objects are marked, but that
    11  //   isn't its goal. This can be driven either synchronously by
    12  //   mcentral.cacheSpan for mcentral spans, or asynchronously by
    13  //   sweepone, which looks at all the mcentral lists.
    14  //
    15  // * The span reclaimer looks for spans that contain no marked objects
    16  //   and frees whole spans. This is a separate algorithm because
    17  //   freeing whole spans is the hardest task for the object reclaimer,
    18  //   but is critical when allocating new spans. The entry point for
    19  //   this is mheap_.reclaim and it's driven by a sequential scan of
    20  //   the page marks bitmap in the heap arenas.
    21  //
    22  // Both algorithms ultimately call mspan.sweep, which sweeps a single
    23  // heap span.
    24  
    25  package runtime
    26  
    27  import (
    28  	"runtime/internal/atomic"
    29  	"unsafe"
    30  )
    31  
    32  var sweep sweepdata
    33  
    34  // State of background sweep.
    35  type sweepdata struct {
    36  	lock    mutex
    37  	g       *g
    38  	parked  bool
    39  	started bool
    40  
    41  	nbgsweep    uint32
    42  	npausesweep uint32
    43  
    44  	// active tracks outstanding sweepers and the sweep
    45  	// termination condition.
    46  	active activeSweep
    47  
    48  	// centralIndex is the current unswept span class.
    49  	// It represents an index into the mcentral span
    50  	// sets. Accessed and updated via its load and
    51  	// update methods. Not protected by a lock.
    52  	//
    53  	// Reset at mark termination.
    54  	// Used by mheap.nextSpanForSweep.
    55  	centralIndex sweepClass
    56  }
    57  
    58  // sweepClass is a spanClass and one bit to represent whether we're currently
    59  // sweeping partial or full spans.
    60  type sweepClass uint32
    61  
    62  const (
    63  	numSweepClasses            = numSpanClasses * 2
    64  	sweepClassDone  sweepClass = sweepClass(^uint32(0))
    65  )
    66  
    67  func (s *sweepClass) load() sweepClass {
    68  	return sweepClass(atomic.Load((*uint32)(s)))
    69  }
    70  
    71  func (s *sweepClass) update(sNew sweepClass) {
    72  	// Only update *s if its current value is less than sNew,
    73  	// since *s increases monotonically.
    74  	sOld := s.load()
    75  	for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
    76  		sOld = s.load()
    77  	}
    78  	// TODO(mknyszek): This isn't the only place we have
    79  	// an atomic monotonically increasing counter. It would
    80  	// be nice to have an "atomic max" which is just implemented
    81  	// as the above on most architectures. Some architectures
    82  	// like RISC-V however have native support for an atomic max.
    83  }
    84  
    85  func (s *sweepClass) clear() {
    86  	atomic.Store((*uint32)(s), 0)
    87  }
    88  
    89  // split returns the underlying span class as well as
    90  // whether we're interested in the full or partial
    91  // unswept lists for that class, indicated as a boolean
    92  // (true means "full").
    93  func (s sweepClass) split() (spc spanClass, full bool) {
    94  	return spanClass(s >> 1), s&1 == 0
    95  }
    96  
    97  // nextSpanForSweep finds and pops the next span for sweeping from the
    98  // central sweep buffers. It returns ownership of the span to the caller.
    99  // Returns nil if no such span exists.
   100  func (h *mheap) nextSpanForSweep() *mspan {
   101  	sg := h.sweepgen
   102  	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
   103  		spc, full := sc.split()
   104  		c := &h.central[spc].mcentral
   105  		var s *mspan
   106  		if full {
   107  			s = c.fullUnswept(sg).pop()
   108  		} else {
   109  			s = c.partialUnswept(sg).pop()
   110  		}
   111  		if s != nil {
   112  			// Write down that we found something so future sweepers
   113  			// can start from here.
   114  			sweep.centralIndex.update(sc)
   115  			return s
   116  		}
   117  	}
   118  	// Write down that we found nothing.
   119  	sweep.centralIndex.update(sweepClassDone)
   120  	return nil
   121  }
   122  
   123  const sweepDrainedMask = 1 << 31
   124  
   125  // activeSweep is a type that captures whether sweeping
   126  // is done, and whether there are any outstanding sweepers.
   127  //
   128  // Every potential sweeper must call begin() before they look
   129  // for work, and end() after they've finished sweeping.
   130  type activeSweep struct {
   131  	// state is divided into two parts.
   132  	//
   133  	// The top bit (masked by sweepDrainedMask) is a boolean
   134  	// value indicating whether all the sweep work has been
   135  	// drained from the queue.
   136  	//
   137  	// The rest of the bits are a counter, indicating the
   138  	// number of outstanding concurrent sweepers.
   139  	state atomic.Uint32
   140  }
   141  
   142  // begin registers a new sweeper. Returns a sweepLocker
   143  // for acquiring spans for sweeping. Any outstanding sweeper blocks
   144  // sweep termination.
   145  //
   146  // If the sweepLocker is invalid, the caller can be sure that all
   147  // outstanding sweep work has been drained, so there is nothing left
   148  // to sweep. Note that there may be sweepers currently running, so
   149  // this does not indicate that all sweeping has completed.
   150  //
   151  // Even if the sweepLocker is invalid, its sweepGen is always valid.
   152  func (a *activeSweep) begin() sweepLocker {
   153  	for {
   154  		state := a.state.Load()
   155  		if state&sweepDrainedMask != 0 {
   156  			return sweepLocker{mheap_.sweepgen, false}
   157  		}
   158  		if a.state.CompareAndSwap(state, state+1) {
   159  			return sweepLocker{mheap_.sweepgen, true}
   160  		}
   161  	}
   162  }
   163  
   164  // end deregisters a sweeper. Must be called once for each time
   165  // begin is called if the sweepLocker is valid.
   166  func (a *activeSweep) end(sl sweepLocker) {
   167  	if sl.sweepGen != mheap_.sweepgen {
   168  		throw("sweeper left outstanding across sweep generations")
   169  	}
   170  	for {
   171  		state := a.state.Load()
   172  		if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
   173  			throw("mismatched begin/end of activeSweep")
   174  		}
   175  		if a.state.CompareAndSwap(state, state-1) {
   176  			if state != sweepDrainedMask {
   177  				return
   178  			}
   179  			if debug.gcpacertrace > 0 {
   180  				print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
   181  			}
   182  			return
   183  		}
   184  	}
   185  }
   186  
   187  // markDrained marks the active sweep cycle as having drained
   188  // all remaining work. This is safe to be called concurrently
   189  // with all other methods of activeSweep, though may race.
   190  //
   191  // Returns true if this call was the one that actually performed
   192  // the mark.
   193  func (a *activeSweep) markDrained() bool {
   194  	for {
   195  		state := a.state.Load()
   196  		if state&sweepDrainedMask != 0 {
   197  			return false
   198  		}
   199  		if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
   200  			return true
   201  		}
   202  	}
   203  }
   204  
   205  // sweepers returns the current number of active sweepers.
   206  func (a *activeSweep) sweepers() uint32 {
   207  	return a.state.Load() &^ sweepDrainedMask
   208  }
   209  
   210  // isDone returns true if all sweep work has been drained and no more
   211  // outstanding sweepers exist. That is, when the sweep phase is
   212  // completely done.
   213  func (a *activeSweep) isDone() bool {
   214  	return a.state.Load() == sweepDrainedMask
   215  }
   216  
   217  // reset sets up the activeSweep for the next sweep cycle.
   218  //
   219  // The world must be stopped.
   220  func (a *activeSweep) reset() {
   221  	assertWorldStopped()
   222  	a.state.Store(0)
   223  }
   224  
   225  // finishsweep_m ensures that all spans are swept.
   226  //
   227  // The world must be stopped. This ensures there are no sweeps in
   228  // progress.
   229  //
   230  //go:nowritebarrier
   231  func finishsweep_m() {
   232  	assertWorldStopped()
   233  
   234  	// Sweeping must be complete before marking commences, so
   235  	// sweep any unswept spans. If this is a concurrent GC, there
   236  	// shouldn't be any spans left to sweep, so this should finish
   237  	// instantly. If GC was forced before the concurrent sweep
   238  	// finished, there may be spans to sweep.
   239  	for sweepone() != ^uintptr(0) {
   240  		sweep.npausesweep++
   241  	}
   242  
   243  	// Make sure there aren't any outstanding sweepers left.
   244  	// At this point, with the world stopped, it means one of two
   245  	// things. Either we were able to preempt a sweeper, or that
   246  	// a sweeper didn't call sweep.active.end when it should have.
   247  	// Both cases indicate a bug, so throw.
   248  	if sweep.active.sweepers() != 0 {
   249  		throw("active sweepers found at start of mark phase")
   250  	}
   251  
   252  	// Reset all the unswept buffers, which should be empty.
   253  	// Do this in sweep termination as opposed to mark termination
   254  	// so that we can catch unswept spans and reclaim blocks as
   255  	// soon as possible.
   256  	sg := mheap_.sweepgen
   257  	for i := range mheap_.central {
   258  		c := &mheap_.central[i].mcentral
   259  		c.partialUnswept(sg).reset()
   260  		c.fullUnswept(sg).reset()
   261  	}
   262  
   263  	// Sweeping is done, so if the scavenger isn't already awake,
   264  	// wake it up. There's definitely work for it to do at this
   265  	// point.
   266  	wakeScavenger()
   267  
   268  	nextMarkBitArenaEpoch()
   269  }
   270  
   271  func bgsweep(c chan int) {
   272  	sweep.g = getg()
   273  
   274  	lockInit(&sweep.lock, lockRankSweep)
   275  	lock(&sweep.lock)
   276  	sweep.parked = true
   277  	c <- 1
   278  	goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
   279  
   280  	for {
   281  		for sweepone() != ^uintptr(0) {
   282  			sweep.nbgsweep++
   283  			Gosched()
   284  		}
   285  		for freeSomeWbufs(true) {
   286  			Gosched()
   287  		}
   288  		lock(&sweep.lock)
   289  		if !isSweepDone() {
   290  			// This can happen if a GC runs between
   291  			// gosweepone returning ^0 above
   292  			// and the lock being acquired.
   293  			unlock(&sweep.lock)
   294  			continue
   295  		}
   296  		sweep.parked = true
   297  		goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
   298  	}
   299  }
   300  
   301  // sweepLocker acquires sweep ownership of spans.
   302  type sweepLocker struct {
   303  	// sweepGen is the sweep generation of the heap.
   304  	sweepGen uint32
   305  	valid    bool
   306  }
   307  
   308  // sweepLocked represents sweep ownership of a span.
   309  type sweepLocked struct {
   310  	*mspan
   311  }
   312  
   313  // tryAcquire attempts to acquire sweep ownership of span s. If it
   314  // successfully acquires ownership, it blocks sweep completion.
   315  func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
   316  	if !l.valid {
   317  		throw("use of invalid sweepLocker")
   318  	}
   319  	// Check before attempting to CAS.
   320  	if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
   321  		return sweepLocked{}, false
   322  	}
   323  	// Attempt to acquire sweep ownership of s.
   324  	if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
   325  		return sweepLocked{}, false
   326  	}
   327  	return sweepLocked{s}, true
   328  }
   329  
   330  // sweepone sweeps some unswept heap span and returns the number of pages returned
   331  // to the heap, or ^uintptr(0) if there was nothing to sweep.
   332  func sweepone() uintptr {
   333  	gp := getg()
   334  
   335  	// Increment locks to ensure that the goroutine is not preempted
   336  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
   337  	gp.m.locks++
   338  
   339  	// TODO(austin): sweepone is almost always called in a loop;
   340  	// lift the sweepLocker into its callers.
   341  	sl := sweep.active.begin()
   342  	if !sl.valid {
   343  		gp.m.locks--
   344  		return ^uintptr(0)
   345  	}
   346  
   347  	// Find a span to sweep.
   348  	npages := ^uintptr(0)
   349  	var noMoreWork bool
   350  	for {
   351  		s := mheap_.nextSpanForSweep()
   352  		if s == nil {
   353  			noMoreWork = sweep.active.markDrained()
   354  			break
   355  		}
   356  		if state := s.state.get(); state != mSpanInUse {
   357  			// This can happen if direct sweeping already
   358  			// swept this span, but in that case the sweep
   359  			// generation should always be up-to-date.
   360  			if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
   361  				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
   362  				throw("non in-use span in unswept list")
   363  			}
   364  			continue
   365  		}
   366  		if s, ok := sl.tryAcquire(s); ok {
   367  			// Sweep the span we found.
   368  			npages = s.npages
   369  			if s.sweep(false) {
   370  				// Whole span was freed. Count it toward the
   371  				// page reclaimer credit since these pages can
   372  				// now be used for span allocation.
   373  				mheap_.reclaimCredit.Add(npages)
   374  			} else {
   375  				// Span is still in-use, so this returned no
   376  				// pages to the heap and the span needs to
   377  				// move to the swept in-use list.
   378  				npages = 0
   379  			}
   380  			break
   381  		}
   382  	}
   383  	sweep.active.end(sl)
   384  
   385  	if noMoreWork {
   386  		// The sweep list is empty. There may still be
   387  		// concurrent sweeps running, but we're at least very
   388  		// close to done sweeping.
   389  
   390  		// Move the scavenge gen forward (signalling
   391  		// that there's new work to do) and wake the scavenger.
   392  		//
   393  		// The scavenger is signaled by the last sweeper because once
   394  		// sweeping is done, we will definitely have useful work for
   395  		// the scavenger to do, since the scavenger only runs over the
   396  		// heap once per GC cycle. This update is not done during sweep
   397  		// termination because in some cases there may be a long delay
   398  		// between sweep done and sweep termination (e.g. not enough
   399  		// allocations to trigger a GC) which would be nice to fill in
   400  		// with scavenging work.
   401  		systemstack(func() {
   402  			lock(&mheap_.lock)
   403  			mheap_.pages.scavengeStartGen()
   404  			unlock(&mheap_.lock)
   405  		})
   406  		// Since we might sweep in an allocation path, it's not possible
   407  		// for us to wake the scavenger directly via wakeScavenger, since
   408  		// it could allocate. Ask sysmon to do it for us instead.
   409  		readyForScavenger()
   410  	}
   411  
   412  	gp.m.locks--
   413  	return npages
   414  }
   415  
   416  // isSweepDone reports whether all spans are swept.
   417  //
   418  // Note that this condition may transition from false to true at any
   419  // time as the sweeper runs. It may transition from true to false if a
   420  // GC runs; to prevent that the caller must be non-preemptible or must
   421  // somehow block GC progress.
   422  func isSweepDone() bool {
   423  	return sweep.active.isDone()
   424  }
   425  
   426  // Returns only when span s has been swept.
   427  //go:nowritebarrier
   428  func (s *mspan) ensureSwept() {
   429  	// Caller must disable preemption.
   430  	// Otherwise when this function returns the span can become unswept again
   431  	// (if GC is triggered on another goroutine).
   432  	_g_ := getg()
   433  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   434  		throw("mspan.ensureSwept: m is not locked")
   435  	}
   436  
   437  	// If this operation fails, then that means that there are
   438  	// no more spans to be swept. In this case, either s has already
   439  	// been swept, or is about to be acquired for sweeping and swept.
   440  	sl := sweep.active.begin()
   441  	if sl.valid {
   442  		// The caller must be sure that the span is a mSpanInUse span.
   443  		if s, ok := sl.tryAcquire(s); ok {
   444  			s.sweep(false)
   445  			sweep.active.end(sl)
   446  			return
   447  		}
   448  		sweep.active.end(sl)
   449  	}
   450  
   451  	// Unfortunately we can't sweep the span ourselves. Somebody else
   452  	// got to it first. We don't have efficient means to wait, but that's
   453  	// OK, it will be swept fairly soon.
   454  	for {
   455  		spangen := atomic.Load(&s.sweepgen)
   456  		if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
   457  			break
   458  		}
   459  		osyield()
   460  	}
   461  }
   462  
   463  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
   464  // It clears the mark bits in preparation for the next GC round.
   465  // Returns true if the span was returned to heap.
   466  // If preserve=true, don't return it to heap nor relink in mcentral lists;
   467  // caller takes care of it.
   468  func (sl *sweepLocked) sweep(preserve bool) bool {
   469  	// It's critical that we enter this function with preemption disabled,
   470  	// GC must not start while we are in the middle of this function.
   471  	_g_ := getg()
   472  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   473  		throw("mspan.sweep: m is not locked")
   474  	}
   475  
   476  	s := sl.mspan
   477  	if !preserve {
   478  		// We'll release ownership of this span. Nil it out to
   479  		// prevent the caller from accidentally using it.
   480  		sl.mspan = nil
   481  	}
   482  
   483  	sweepgen := mheap_.sweepgen
   484  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   485  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   486  		throw("mspan.sweep: bad span state")
   487  	}
   488  
   489  	if trace.enabled {
   490  		traceGCSweepSpan(s.npages * _PageSize)
   491  	}
   492  
   493  	mheap_.pagesSwept.Add(int64(s.npages))
   494  
   495  	spc := s.spanclass
   496  	size := s.elemsize
   497  
   498  	// The allocBits indicate which unmarked objects don't need to be
   499  	// processed since they were free at the end of the last GC cycle
   500  	// and were not allocated since then.
   501  	// If the allocBits index is >= s.freeindex and the bit
   502  	// is not marked then the object remains unallocated
   503  	// since the last GC.
   504  	// This situation is analogous to being on a freelist.
   505  
   506  	// Unlink & free special records for any objects we're about to free.
   507  	// Two complications here:
   508  	// 1. An object can have both finalizer and profile special records.
   509  	//    In such case we need to queue finalizer for execution,
   510  	//    mark the object as live and preserve the profile special.
   511  	// 2. A tiny object can have several finalizers setup for different offsets.
   512  	//    If such object is not marked, we need to queue all finalizers at once.
   513  	// Both 1 and 2 are possible at the same time.
   514  	hadSpecials := s.specials != nil
   515  	siter := newSpecialsIter(s)
   516  	for siter.valid() {
   517  		// A finalizer can be set for an inner byte of an object, find object beginning.
   518  		objIndex := uintptr(siter.s.offset) / size
   519  		p := s.base() + objIndex*size
   520  		mbits := s.markBitsForIndex(objIndex)
   521  		if !mbits.isMarked() {
   522  			// This object is not marked and has at least one special record.
   523  			// Pass 1: see if it has at least one finalizer.
   524  			hasFin := false
   525  			endOffset := p - s.base() + size
   526  			for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
   527  				if tmp.kind == _KindSpecialFinalizer {
   528  					// Stop freeing of object if it has a finalizer.
   529  					mbits.setMarkedNonAtomic()
   530  					hasFin = true
   531  					break
   532  				}
   533  			}
   534  			// Pass 2: queue all finalizers _or_ handle profile record.
   535  			for siter.valid() && uintptr(siter.s.offset) < endOffset {
   536  				// Find the exact byte for which the special was setup
   537  				// (as opposed to object beginning).
   538  				special := siter.s
   539  				p := s.base() + uintptr(special.offset)
   540  				if special.kind == _KindSpecialFinalizer || !hasFin {
   541  					siter.unlinkAndNext()
   542  					freeSpecial(special, unsafe.Pointer(p), size)
   543  				} else {
   544  					// The object has finalizers, so we're keeping it alive.
   545  					// All other specials only apply when an object is freed,
   546  					// so just keep the special record.
   547  					siter.next()
   548  				}
   549  			}
   550  		} else {
   551  			// object is still live
   552  			if siter.s.kind == _KindSpecialReachable {
   553  				special := siter.unlinkAndNext()
   554  				(*specialReachable)(unsafe.Pointer(special)).reachable = true
   555  				freeSpecial(special, unsafe.Pointer(p), size)
   556  			} else {
   557  				// keep special record
   558  				siter.next()
   559  			}
   560  		}
   561  	}
   562  	if hadSpecials && s.specials == nil {
   563  		spanHasNoSpecials(s)
   564  	}
   565  
   566  	if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
   567  		// Find all newly freed objects. This doesn't have to
   568  		// efficient; allocfreetrace has massive overhead.
   569  		mbits := s.markBitsForBase()
   570  		abits := s.allocBitsForIndex(0)
   571  		for i := uintptr(0); i < s.nelems; i++ {
   572  			if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
   573  				x := s.base() + i*s.elemsize
   574  				if debug.allocfreetrace != 0 {
   575  					tracefree(unsafe.Pointer(x), size)
   576  				}
   577  				if debug.clobberfree != 0 {
   578  					clobberfree(unsafe.Pointer(x), size)
   579  				}
   580  				if raceenabled {
   581  					racefree(unsafe.Pointer(x), size)
   582  				}
   583  				if msanenabled {
   584  					msanfree(unsafe.Pointer(x), size)
   585  				}
   586  				if asanenabled {
   587  					asanpoison(unsafe.Pointer(x), size)
   588  				}
   589  			}
   590  			mbits.advance()
   591  			abits.advance()
   592  		}
   593  	}
   594  
   595  	// Check for zombie objects.
   596  	if s.freeindex < s.nelems {
   597  		// Everything < freeindex is allocated and hence
   598  		// cannot be zombies.
   599  		//
   600  		// Check the first bitmap byte, where we have to be
   601  		// careful with freeindex.
   602  		obj := s.freeindex
   603  		if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
   604  			s.reportZombies()
   605  		}
   606  		// Check remaining bytes.
   607  		for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
   608  			if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
   609  				s.reportZombies()
   610  			}
   611  		}
   612  	}
   613  
   614  	// Count the number of free objects in this span.
   615  	nalloc := uint16(s.countAlloc())
   616  	nfreed := s.allocCount - nalloc
   617  	if nalloc > s.allocCount {
   618  		// The zombie check above should have caught this in
   619  		// more detail.
   620  		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
   621  		throw("sweep increased allocation count")
   622  	}
   623  
   624  	s.allocCount = nalloc
   625  	s.freeindex = 0 // reset allocation index to start of span.
   626  	if trace.enabled {
   627  		getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
   628  	}
   629  
   630  	// gcmarkBits becomes the allocBits.
   631  	// get a fresh cleared gcmarkBits in preparation for next GC
   632  	s.allocBits = s.gcmarkBits
   633  	s.gcmarkBits = newMarkBits(s.nelems)
   634  
   635  	// Initialize alloc bits cache.
   636  	s.refillAllocCache(0)
   637  
   638  	// The span must be in our exclusive ownership until we update sweepgen,
   639  	// check for potential races.
   640  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   641  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   642  		throw("mspan.sweep: bad span state after sweep")
   643  	}
   644  	if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
   645  		throw("swept cached span")
   646  	}
   647  
   648  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
   649  	// because of the potential for a concurrent free/SetFinalizer.
   650  	//
   651  	// But we need to set it before we make the span available for allocation
   652  	// (return it to heap or mcentral), because allocation code assumes that a
   653  	// span is already swept if available for allocation.
   654  	//
   655  	// Serialization point.
   656  	// At this point the mark bits are cleared and allocation ready
   657  	// to go so release the span.
   658  	atomic.Store(&s.sweepgen, sweepgen)
   659  
   660  	if spc.sizeclass() != 0 {
   661  		// Handle spans for small objects.
   662  		if nfreed > 0 {
   663  			// Only mark the span as needing zeroing if we've freed any
   664  			// objects, because a fresh span that had been allocated into,
   665  			// wasn't totally filled, but then swept, still has all of its
   666  			// free slots zeroed.
   667  			s.needzero = 1
   668  			stats := memstats.heapStats.acquire()
   669  			atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
   670  			memstats.heapStats.release()
   671  		}
   672  		if !preserve {
   673  			// The caller may not have removed this span from whatever
   674  			// unswept set its on but taken ownership of the span for
   675  			// sweeping by updating sweepgen. If this span still is in
   676  			// an unswept set, then the mcentral will pop it off the
   677  			// set, check its sweepgen, and ignore it.
   678  			if nalloc == 0 {
   679  				// Free totally free span directly back to the heap.
   680  				mheap_.freeSpan(s)
   681  				return true
   682  			}
   683  			// Return span back to the right mcentral list.
   684  			if uintptr(nalloc) == s.nelems {
   685  				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   686  			} else {
   687  				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
   688  			}
   689  		}
   690  	} else if !preserve {
   691  		// Handle spans for large objects.
   692  		if nfreed != 0 {
   693  			// Free large object span to heap.
   694  
   695  			// NOTE(rsc,dvyukov): The original implementation of efence
   696  			// in CL 22060046 used sysFree instead of sysFault, so that
   697  			// the operating system would eventually give the memory
   698  			// back to us again, so that an efence program could run
   699  			// longer without running out of memory. Unfortunately,
   700  			// calling sysFree here without any kind of adjustment of the
   701  			// heap data structures means that when the memory does
   702  			// come back to us, we have the wrong metadata for it, either in
   703  			// the mspan structures or in the garbage collection bitmap.
   704  			// Using sysFault here means that the program will run out of
   705  			// memory fairly quickly in efence mode, but at least it won't
   706  			// have mysterious crashes due to confused memory reuse.
   707  			// It should be possible to switch back to sysFree if we also
   708  			// implement and then call some kind of mheap.deleteSpan.
   709  			if debug.efence > 0 {
   710  				s.limit = 0 // prevent mlookup from finding this span
   711  				sysFault(unsafe.Pointer(s.base()), size)
   712  			} else {
   713  				mheap_.freeSpan(s)
   714  			}
   715  			stats := memstats.heapStats.acquire()
   716  			atomic.Xadduintptr(&stats.largeFreeCount, 1)
   717  			atomic.Xadduintptr(&stats.largeFree, size)
   718  			memstats.heapStats.release()
   719  			return true
   720  		}
   721  
   722  		// Add a large span directly onto the full+swept list.
   723  		mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   724  	}
   725  	return false
   726  }
   727  
   728  // reportZombies reports any marked but free objects in s and throws.
   729  //
   730  // This generally means one of the following:
   731  //
   732  // 1. User code converted a pointer to a uintptr and then back
   733  // unsafely, and a GC ran while the uintptr was the only reference to
   734  // an object.
   735  //
   736  // 2. User code (or a compiler bug) constructed a bad pointer that
   737  // points to a free slot, often a past-the-end pointer.
   738  //
   739  // 3. The GC two cycles ago missed a pointer and freed a live object,
   740  // but it was still live in the last cycle, so this GC cycle found a
   741  // pointer to that object and marked it.
   742  func (s *mspan) reportZombies() {
   743  	printlock()
   744  	print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
   745  	mbits := s.markBitsForBase()
   746  	abits := s.allocBitsForIndex(0)
   747  	for i := uintptr(0); i < s.nelems; i++ {
   748  		addr := s.base() + i*s.elemsize
   749  		print(hex(addr))
   750  		alloc := i < s.freeindex || abits.isMarked()
   751  		if alloc {
   752  			print(" alloc")
   753  		} else {
   754  			print(" free ")
   755  		}
   756  		if mbits.isMarked() {
   757  			print(" marked  ")
   758  		} else {
   759  			print(" unmarked")
   760  		}
   761  		zombie := mbits.isMarked() && !alloc
   762  		if zombie {
   763  			print(" zombie")
   764  		}
   765  		print("\n")
   766  		if zombie {
   767  			length := s.elemsize
   768  			if length > 1024 {
   769  				length = 1024
   770  			}
   771  			hexdumpWords(addr, addr+length, nil)
   772  		}
   773  		mbits.advance()
   774  		abits.advance()
   775  	}
   776  	throw("found pointer to free object")
   777  }
   778  
   779  // deductSweepCredit deducts sweep credit for allocating a span of
   780  // size spanBytes. This must be performed *before* the span is
   781  // allocated to ensure the system has enough credit. If necessary, it
   782  // performs sweeping to prevent going in to debt. If the caller will
   783  // also sweep pages (e.g., for a large allocation), it can pass a
   784  // non-zero callerSweepPages to leave that many pages unswept.
   785  //
   786  // deductSweepCredit makes a worst-case assumption that all spanBytes
   787  // bytes of the ultimately allocated span will be available for object
   788  // allocation.
   789  //
   790  // deductSweepCredit is the core of the "proportional sweep" system.
   791  // It uses statistics gathered by the garbage collector to perform
   792  // enough sweeping so that all pages are swept during the concurrent
   793  // sweep phase between GC cycles.
   794  //
   795  // mheap_ must NOT be locked.
   796  func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
   797  	if mheap_.sweepPagesPerByte == 0 {
   798  		// Proportional sweep is done or disabled.
   799  		return
   800  	}
   801  
   802  	if trace.enabled {
   803  		traceGCSweepStart()
   804  	}
   805  
   806  retry:
   807  	sweptBasis := mheap_.pagesSweptBasis.Load()
   808  
   809  	// Fix debt if necessary.
   810  	newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
   811  	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
   812  	for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
   813  		if sweepone() == ^uintptr(0) {
   814  			mheap_.sweepPagesPerByte = 0
   815  			break
   816  		}
   817  		if mheap_.pagesSweptBasis.Load() != sweptBasis {
   818  			// Sweep pacing changed. Recompute debt.
   819  			goto retry
   820  		}
   821  	}
   822  
   823  	if trace.enabled {
   824  		traceGCSweepDone()
   825  	}
   826  }
   827  
   828  // clobberfree sets the memory content at x to bad content, for debugging
   829  // purposes.
   830  func clobberfree(x unsafe.Pointer, size uintptr) {
   831  	// size (span.elemsize) is always a multiple of 4.
   832  	for i := uintptr(0); i < size; i += 4 {
   833  		*(*uint32)(add(x, i)) = 0xdeadbeef
   834  	}
   835  }
   836  
   837  // gcPaceSweeper updates the sweeper's pacing parameters.
   838  //
   839  // Must be called whenever the GC's pacing is updated.
   840  //
   841  // The world must be stopped, or mheap_.lock must be held.
   842  func gcPaceSweeper(trigger uint64) {
   843  	assertWorldStoppedOrLockHeld(&mheap_.lock)
   844  
   845  	// Update sweep pacing.
   846  	if isSweepDone() {
   847  		mheap_.sweepPagesPerByte = 0
   848  	} else {
   849  		// Concurrent sweep needs to sweep all of the in-use
   850  		// pages by the time the allocated heap reaches the GC
   851  		// trigger. Compute the ratio of in-use pages to sweep
   852  		// per byte allocated, accounting for the fact that
   853  		// some might already be swept.
   854  		heapLiveBasis := atomic.Load64(&gcController.heapLive)
   855  		heapDistance := int64(trigger) - int64(heapLiveBasis)
   856  		// Add a little margin so rounding errors and
   857  		// concurrent sweep are less likely to leave pages
   858  		// unswept when GC starts.
   859  		heapDistance -= 1024 * 1024
   860  		if heapDistance < _PageSize {
   861  			// Avoid setting the sweep ratio extremely high
   862  			heapDistance = _PageSize
   863  		}
   864  		pagesSwept := mheap_.pagesSwept.Load()
   865  		pagesInUse := mheap_.pagesInUse.Load()
   866  		sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
   867  		if sweepDistancePages <= 0 {
   868  			mheap_.sweepPagesPerByte = 0
   869  		} else {
   870  			mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
   871  			mheap_.sweepHeapLiveBasis = heapLiveBasis
   872  			// Write pagesSweptBasis last, since this
   873  			// signals concurrent sweeps to recompute
   874  			// their debt.
   875  			mheap_.pagesSweptBasis.Store(pagesSwept)
   876  		}
   877  	}
   878  }