github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/runtime/mgcsweep.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: sweeping
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  var sweep sweepdata
    16  
    17  // State of background sweep.
    18  type sweepdata struct {
    19  	lock    mutex
    20  	g       *g
    21  	parked  bool
    22  	started bool
    23  
    24  	spanidx uint32 // background sweeper position
    25  
    26  	nbgsweep    uint32
    27  	npausesweep uint32
    28  }
    29  
    30  //go:nowritebarrier
    31  func finishsweep_m(stw bool) {
    32  	// Sweeping must be complete before marking commences, so
    33  	// sweep any unswept spans. If this is a concurrent GC, there
    34  	// shouldn't be any spans left to sweep, so this should finish
    35  	// instantly. If GC was forced before the concurrent sweep
    36  	// finished, there may be spans to sweep.
    37  	for sweepone() != ^uintptr(0) {
    38  		sweep.npausesweep++
    39  	}
    40  
    41  	// There may be some other spans being swept concurrently that
    42  	// we need to wait for. If finishsweep_m is done with the world stopped
    43  	// this is not required because the STW must have waited for sweeps.
    44  	//
    45  	// TODO(austin): As of this writing, we always pass true for stw.
    46  	// Consider removing this code.
    47  	if !stw {
    48  		sg := mheap_.sweepgen
    49  		for _, s := range work.spans {
    50  			if s.sweepgen != sg && s.state == _MSpanInUse {
    51  				s.ensureSwept()
    52  			}
    53  		}
    54  	}
    55  }
    56  
    57  func bgsweep(c chan int) {
    58  	sweep.g = getg()
    59  
    60  	lock(&sweep.lock)
    61  	sweep.parked = true
    62  	c <- 1
    63  	goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
    64  
    65  	for {
    66  		for gosweepone() != ^uintptr(0) {
    67  			sweep.nbgsweep++
    68  			Gosched()
    69  		}
    70  		lock(&sweep.lock)
    71  		if !gosweepdone() {
    72  			// This can happen if a GC runs between
    73  			// gosweepone returning ^0 above
    74  			// and the lock being acquired.
    75  			unlock(&sweep.lock)
    76  			continue
    77  		}
    78  		sweep.parked = true
    79  		goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
    80  	}
    81  }
    82  
    83  // sweeps one span
    84  // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
    85  //go:nowritebarrier
    86  func sweepone() uintptr {
    87  	_g_ := getg()
    88  
    89  	// increment locks to ensure that the goroutine is not preempted
    90  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
    91  	_g_.m.locks++
    92  	sg := mheap_.sweepgen
    93  	for {
    94  		idx := atomic.Xadd(&sweep.spanidx, 1) - 1
    95  		if idx >= uint32(len(work.spans)) {
    96  			mheap_.sweepdone = 1
    97  			_g_.m.locks--
    98  			if debug.gcpacertrace > 0 && idx == uint32(len(work.spans)) {
    99  				print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
   100  			}
   101  			return ^uintptr(0)
   102  		}
   103  		s := work.spans[idx]
   104  		if s.state != mSpanInUse {
   105  			s.sweepgen = sg
   106  			continue
   107  		}
   108  		if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   109  			continue
   110  		}
   111  		npages := s.npages
   112  		if !s.sweep(false) {
   113  			npages = 0
   114  		}
   115  		_g_.m.locks--
   116  		return npages
   117  	}
   118  }
   119  
   120  //go:nowritebarrier
   121  func gosweepone() uintptr {
   122  	var ret uintptr
   123  	systemstack(func() {
   124  		ret = sweepone()
   125  	})
   126  	return ret
   127  }
   128  
   129  //go:nowritebarrier
   130  func gosweepdone() bool {
   131  	return mheap_.sweepdone != 0
   132  }
   133  
   134  // Returns only when span s has been swept.
   135  //go:nowritebarrier
   136  func (s *mspan) ensureSwept() {
   137  	// Caller must disable preemption.
   138  	// Otherwise when this function returns the span can become unswept again
   139  	// (if GC is triggered on another goroutine).
   140  	_g_ := getg()
   141  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   142  		throw("MSpan_EnsureSwept: m is not locked")
   143  	}
   144  
   145  	sg := mheap_.sweepgen
   146  	if atomic.Load(&s.sweepgen) == sg {
   147  		return
   148  	}
   149  	// The caller must be sure that the span is a MSpanInUse span.
   150  	if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   151  		s.sweep(false)
   152  		return
   153  	}
   154  	// unfortunate condition, and we don't have efficient means to wait
   155  	for atomic.Load(&s.sweepgen) != sg {
   156  		osyield()
   157  	}
   158  }
   159  
   160  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
   161  // It clears the mark bits in preparation for the next GC round.
   162  // Returns true if the span was returned to heap.
   163  // If preserve=true, don't return it to heap nor relink in MCentral lists;
   164  // caller takes care of it.
   165  //TODO go:nowritebarrier
   166  func (s *mspan) sweep(preserve bool) bool {
   167  	// It's critical that we enter this function with preemption disabled,
   168  	// GC must not start while we are in the middle of this function.
   169  	_g_ := getg()
   170  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
   171  		throw("MSpan_Sweep: m is not locked")
   172  	}
   173  	sweepgen := mheap_.sweepgen
   174  	if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
   175  		print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   176  		throw("MSpan_Sweep: bad span state")
   177  	}
   178  
   179  	if trace.enabled {
   180  		traceGCSweepStart()
   181  	}
   182  
   183  	atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
   184  
   185  	cl := s.sizeclass
   186  	size := s.elemsize
   187  	res := false
   188  	nfree := 0
   189  
   190  	var head, end gclinkptr
   191  
   192  	c := _g_.m.mcache
   193  	freeToHeap := false
   194  
   195  	// Mark any free objects in this span so we don't collect them.
   196  	sstart := uintptr(s.start << _PageShift)
   197  	for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
   198  		if uintptr(link) < sstart || s.limit <= uintptr(link) {
   199  			// Free list is corrupted.
   200  			dumpFreeList(s)
   201  			throw("free list corrupted")
   202  		}
   203  		heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
   204  	}
   205  
   206  	// Unlink & free special records for any objects we're about to free.
   207  	// Two complications here:
   208  	// 1. An object can have both finalizer and profile special records.
   209  	//    In such case we need to queue finalizer for execution,
   210  	//    mark the object as live and preserve the profile special.
   211  	// 2. A tiny object can have several finalizers setup for different offsets.
   212  	//    If such object is not marked, we need to queue all finalizers at once.
   213  	// Both 1 and 2 are possible at the same time.
   214  	specialp := &s.specials
   215  	special := *specialp
   216  	for special != nil {
   217  		// A finalizer can be set for an inner byte of an object, find object beginning.
   218  		p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
   219  		hbits := heapBitsForAddr(p)
   220  		if !hbits.isMarked() {
   221  			// This object is not marked and has at least one special record.
   222  			// Pass 1: see if it has at least one finalizer.
   223  			hasFin := false
   224  			endOffset := p - uintptr(s.start<<_PageShift) + size
   225  			for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
   226  				if tmp.kind == _KindSpecialFinalizer {
   227  					// Stop freeing of object if it has a finalizer.
   228  					hbits.setMarkedNonAtomic()
   229  					hasFin = true
   230  					break
   231  				}
   232  			}
   233  			// Pass 2: queue all finalizers _or_ handle profile record.
   234  			for special != nil && uintptr(special.offset) < endOffset {
   235  				// Find the exact byte for which the special was setup
   236  				// (as opposed to object beginning).
   237  				p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
   238  				if special.kind == _KindSpecialFinalizer || !hasFin {
   239  					// Splice out special record.
   240  					y := special
   241  					special = special.next
   242  					*specialp = special
   243  					freespecial(y, unsafe.Pointer(p), size)
   244  				} else {
   245  					// This is profile record, but the object has finalizers (so kept alive).
   246  					// Keep special record.
   247  					specialp = &special.next
   248  					special = *specialp
   249  				}
   250  			}
   251  		} else {
   252  			// object is still live: keep special record
   253  			specialp = &special.next
   254  			special = *specialp
   255  		}
   256  	}
   257  
   258  	// Sweep through n objects of given size starting at p.
   259  	// This thread owns the span now, so it can manipulate
   260  	// the block bitmap without atomic operations.
   261  
   262  	size, n, _ := s.layout()
   263  	heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
   264  		// At this point we know that we are looking at garbage object
   265  		// that needs to be collected.
   266  		if debug.allocfreetrace != 0 {
   267  			tracefree(unsafe.Pointer(p), size)
   268  		}
   269  		if msanenabled {
   270  			msanfree(unsafe.Pointer(p), size)
   271  		}
   272  
   273  		// Reset to allocated+noscan.
   274  		if cl == 0 {
   275  			// Free large span.
   276  			if preserve {
   277  				throw("can't preserve large span")
   278  			}
   279  			s.needzero = 1
   280  
   281  			// Free the span after heapBitsSweepSpan
   282  			// returns, since it's not done with the span.
   283  			freeToHeap = true
   284  		} else {
   285  			// Free small object.
   286  			if size > 2*sys.PtrSize {
   287  				*(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
   288  			} else if size > sys.PtrSize {
   289  				*(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = 0
   290  			}
   291  			if head.ptr() == nil {
   292  				head = gclinkptr(p)
   293  			} else {
   294  				end.ptr().next = gclinkptr(p)
   295  			}
   296  			end = gclinkptr(p)
   297  			end.ptr().next = gclinkptr(0x0bade5)
   298  			nfree++
   299  		}
   300  	})
   301  
   302  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
   303  	// because of the potential for a concurrent free/SetFinalizer.
   304  	// But we need to set it before we make the span available for allocation
   305  	// (return it to heap or mcentral), because allocation code assumes that a
   306  	// span is already swept if available for allocation.
   307  	if freeToHeap || nfree == 0 {
   308  		// The span must be in our exclusive ownership until we update sweepgen,
   309  		// check for potential races.
   310  		if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
   311  			print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   312  			throw("MSpan_Sweep: bad span state after sweep")
   313  		}
   314  		atomic.Store(&s.sweepgen, sweepgen)
   315  	}
   316  	if nfree > 0 {
   317  		c.local_nsmallfree[cl] += uintptr(nfree)
   318  		res = mheap_.central[cl].mcentral.freeSpan(s, int32(nfree), head, end, preserve)
   319  		// MCentral_FreeSpan updates sweepgen
   320  	} else if freeToHeap {
   321  		// Free large span to heap
   322  
   323  		// NOTE(rsc,dvyukov): The original implementation of efence
   324  		// in CL 22060046 used SysFree instead of SysFault, so that
   325  		// the operating system would eventually give the memory
   326  		// back to us again, so that an efence program could run
   327  		// longer without running out of memory. Unfortunately,
   328  		// calling SysFree here without any kind of adjustment of the
   329  		// heap data structures means that when the memory does
   330  		// come back to us, we have the wrong metadata for it, either in
   331  		// the MSpan structures or in the garbage collection bitmap.
   332  		// Using SysFault here means that the program will run out of
   333  		// memory fairly quickly in efence mode, but at least it won't
   334  		// have mysterious crashes due to confused memory reuse.
   335  		// It should be possible to switch back to SysFree if we also
   336  		// implement and then call some kind of MHeap_DeleteSpan.
   337  		if debug.efence > 0 {
   338  			s.limit = 0 // prevent mlookup from finding this span
   339  			sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
   340  		} else {
   341  			mheap_.freeSpan(s, 1)
   342  		}
   343  		c.local_nlargefree++
   344  		c.local_largefree += size
   345  		res = true
   346  	}
   347  	if trace.enabled {
   348  		traceGCSweepDone()
   349  	}
   350  	return res
   351  }
   352  
   353  // deductSweepCredit deducts sweep credit for allocating a span of
   354  // size spanBytes. This must be performed *before* the span is
   355  // allocated to ensure the system has enough credit. If necessary, it
   356  // performs sweeping to prevent going in to debt. If the caller will
   357  // also sweep pages (e.g., for a large allocation), it can pass a
   358  // non-zero callerSweepPages to leave that many pages unswept.
   359  //
   360  // deductSweepCredit makes a worst-case assumption that all spanBytes
   361  // bytes of the ultimately allocated span will be available for object
   362  // allocation. The caller should call reimburseSweepCredit if that
   363  // turns out not to be the case once the span is allocated.
   364  //
   365  // deductSweepCredit is the core of the "proportional sweep" system.
   366  // It uses statistics gathered by the garbage collector to perform
   367  // enough sweeping so that all pages are swept during the concurrent
   368  // sweep phase between GC cycles.
   369  //
   370  // mheap_ must NOT be locked.
   371  func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
   372  	if mheap_.sweepPagesPerByte == 0 {
   373  		// Proportional sweep is done or disabled.
   374  		return
   375  	}
   376  
   377  	// Account for this span allocation.
   378  	spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
   379  
   380  	// Fix debt if necessary.
   381  	pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
   382  	for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
   383  		if gosweepone() == ^uintptr(0) {
   384  			mheap_.sweepPagesPerByte = 0
   385  			break
   386  		}
   387  	}
   388  }
   389  
   390  // reimburseSweepCredit records that unusableBytes bytes of a
   391  // just-allocated span are not available for object allocation. This
   392  // offsets the worst-case charge performed by deductSweepCredit.
   393  func reimburseSweepCredit(unusableBytes uintptr) {
   394  	if mheap_.sweepPagesPerByte == 0 {
   395  		// Nobody cares about the credit. Avoid the atomic.
   396  		return
   397  	}
   398  	if int64(atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))) < 0 {
   399  		throw("spanBytesAlloc underflow")
   400  	}
   401  }
   402  
   403  func dumpFreeList(s *mspan) {
   404  	printlock()
   405  	print("runtime: free list of span ", s, ":\n")
   406  	sstart := uintptr(s.start << _PageShift)
   407  	link := s.freelist
   408  	for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ {
   409  		if i != 0 {
   410  			print(" -> ")
   411  		}
   412  		print(hex(link))
   413  		if link.ptr() == nil {
   414  			break
   415  		}
   416  		if uintptr(link) < sstart || s.limit <= uintptr(link) {
   417  			// Bad link. Stop walking before we crash.
   418  			print(" (BAD)")
   419  			break
   420  		}
   421  		link = link.ptr().next
   422  	}
   423  	print("\n")
   424  	printunlock()
   425  }