github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/mcentral.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The mcentral doesn't actually contain the list of free objects; the mspan does.
    10  // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "runtime/internal/atomic"
    16  
    17  // Central list of free objects of a given size.
    18  //
    19  //go:notinheap
    20  type mcentral struct {
    21  	spanclass spanClass
    22  
    23  	// partial and full contain two mspan sets: one of swept in-use
    24  	// spans, and one of unswept in-use spans. These two trade
    25  	// roles on each GC cycle. The unswept set is drained either by
    26  	// allocation or by the background sweeper in every GC cycle,
    27  	// so only two roles are necessary.
    28  	//
    29  	// sweepgen is increased by 2 on each GC cycle, so the swept
    30  	// spans are in partial[sweepgen/2%2] and the unswept spans are in
    31  	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
    32  	// unswept set and pushes spans that are still in-use on the
    33  	// swept set. Likewise, allocating an in-use span pushes it
    34  	// on the swept set.
    35  	//
    36  	// Some parts of the sweeper can sweep arbitrary spans, and hence
    37  	// can't remove them from the unswept set, but will add the span
    38  	// to the appropriate swept list. As a result, the parts of the
    39  	// sweeper and mcentral that do consume from the unswept list may
    40  	// encounter swept spans, and these should be ignored.
    41  	partial [2]spanSet // list of spans with a free object
    42  	full    [2]spanSet // list of spans with no free objects
    43  }
    44  
    45  // Initialize a single central free list.
    46  func (c *mcentral) init(spc spanClass) {
    47  	c.spanclass = spc
    48  	lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
    49  	lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
    50  	lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
    51  	lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
    52  }
    53  
    54  // partialUnswept returns the spanSet which holds partially-filled
    55  // unswept spans for this sweepgen.
    56  func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
    57  	return &c.partial[1-sweepgen/2%2]
    58  }
    59  
    60  // partialSwept returns the spanSet which holds partially-filled
    61  // swept spans for this sweepgen.
    62  func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
    63  	return &c.partial[sweepgen/2%2]
    64  }
    65  
    66  // fullUnswept returns the spanSet which holds unswept spans without any
    67  // free slots for this sweepgen.
    68  func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
    69  	return &c.full[1-sweepgen/2%2]
    70  }
    71  
    72  // fullSwept returns the spanSet which holds swept spans without any
    73  // free slots for this sweepgen.
    74  func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
    75  	return &c.full[sweepgen/2%2]
    76  }
    77  
    78  // Allocate a span to use in an mcache.
    79  func (c *mcentral) cacheSpan() *mspan {
    80  	// Deduct credit for this span allocation and sweep if necessary.
    81  	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
    82  	deductSweepCredit(spanBytes, 0)
    83  
    84  	sg := mheap_.sweepgen
    85  
    86  	traceDone := false
    87  	if trace.enabled {
    88  		traceGCSweepStart()
    89  	}
    90  
    91  	// If we sweep spanBudget spans without finding any free
    92  	// space, just allocate a fresh span. This limits the amount
    93  	// of time we can spend trying to find free space and
    94  	// amortizes the cost of small object sweeping over the
    95  	// benefit of having a full free span to allocate from. By
    96  	// setting this to 100, we limit the space overhead to 1%.
    97  	//
    98  	// TODO(austin,mknyszek): This still has bad worst-case
    99  	// throughput. For example, this could find just one free slot
   100  	// on the 100th swept span. That limits allocation latency, but
   101  	// still has very poor throughput. We could instead keep a
   102  	// running free-to-used budget and switch to fresh span
   103  	// allocation if the budget runs low.
   104  	spanBudget := 100
   105  
   106  	var s *mspan
   107  
   108  	// Try partial swept spans first.
   109  	if s = c.partialSwept(sg).pop(); s != nil {
   110  		goto havespan
   111  	}
   112  
   113  	// Now try partial unswept spans.
   114  	for ; spanBudget >= 0; spanBudget-- {
   115  		s = c.partialUnswept(sg).pop()
   116  		if s == nil {
   117  			break
   118  		}
   119  		if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   120  			// We got ownership of the span, so let's sweep it and use it.
   121  			s.sweep(true)
   122  			goto havespan
   123  		}
   124  		// We failed to get ownership of the span, which means it's being or
   125  		// has been swept by an asynchronous sweeper that just couldn't remove it
   126  		// from the unswept list. That sweeper took ownership of the span and
   127  		// responsibility for either freeing it to the heap or putting it on the
   128  		// right swept list. Either way, we should just ignore it (and it's unsafe
   129  		// for us to do anything else).
   130  	}
   131  	// Now try full unswept spans, sweeping them and putting them into the
   132  	// right list if we fail to get a span.
   133  	for ; spanBudget >= 0; spanBudget-- {
   134  		s = c.fullUnswept(sg).pop()
   135  		if s == nil {
   136  			break
   137  		}
   138  		if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   139  			// We got ownership of the span, so let's sweep it.
   140  			s.sweep(true)
   141  			// Check if there's any free space.
   142  			freeIndex := s.nextFreeIndex()
   143  			if freeIndex != s.nelems {
   144  				s.freeindex = freeIndex
   145  				goto havespan
   146  			}
   147  			// Add it to the swept list, because sweeping didn't give us any free space.
   148  			c.fullSwept(sg).push(s)
   149  		}
   150  		// See comment for partial unswept spans.
   151  	}
   152  	if trace.enabled {
   153  		traceGCSweepDone()
   154  		traceDone = true
   155  	}
   156  
   157  	// We failed to get a span from the mcentral so get one from mheap.
   158  	s = c.grow()
   159  	if s == nil {
   160  		return nil
   161  	}
   162  
   163  	// At this point s is a span that should have free slots.
   164  havespan:
   165  	if trace.enabled && !traceDone {
   166  		traceGCSweepDone()
   167  	}
   168  	n := int(s.nelems) - int(s.allocCount)
   169  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
   170  		throw("span has no free objects")
   171  	}
   172  	freeByteBase := s.freeindex &^ (64 - 1)
   173  	whichByte := freeByteBase / 8
   174  	// Init alloc bits cache.
   175  	s.refillAllocCache(whichByte)
   176  
   177  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   178  	// s.allocCache.
   179  	s.allocCache >>= s.freeindex % 64
   180  
   181  	return s
   182  }
   183  
   184  // Return span from an mcache.
   185  //
   186  // s must have a span class corresponding to this
   187  // mcentral and it must not be empty.
   188  func (c *mcentral) uncacheSpan(s *mspan) {
   189  	if s.allocCount == 0 {
   190  		throw("uncaching span but s.allocCount == 0")
   191  	}
   192  
   193  	sg := mheap_.sweepgen
   194  	stale := s.sweepgen == sg+1
   195  
   196  	// Fix up sweepgen.
   197  	if stale {
   198  		// Span was cached before sweep began. It's our
   199  		// responsibility to sweep it.
   200  		//
   201  		// Set sweepgen to indicate it's not cached but needs
   202  		// sweeping and can't be allocated from. sweep will
   203  		// set s.sweepgen to indicate s is swept.
   204  		atomic.Store(&s.sweepgen, sg-1)
   205  	} else {
   206  		// Indicate that s is no longer cached.
   207  		atomic.Store(&s.sweepgen, sg)
   208  	}
   209  
   210  	// Put the span in the appropriate place.
   211  	if stale {
   212  		// It's stale, so just sweep it. Sweeping will put it on
   213  		// the right list.
   214  		s.sweep(false)
   215  	} else {
   216  		if int(s.nelems)-int(s.allocCount) > 0 {
   217  			// Put it back on the partial swept list.
   218  			c.partialSwept(sg).push(s)
   219  		} else {
   220  			// There's no free space and it's not stale, so put it on the
   221  			// full swept list.
   222  			c.fullSwept(sg).push(s)
   223  		}
   224  	}
   225  }
   226  
   227  // grow allocates a new empty span from the heap and initializes it for c's size class.
   228  func (c *mcentral) grow() *mspan {
   229  	npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
   230  	size := uintptr(class_to_size[c.spanclass.sizeclass()])
   231  
   232  	s := mheap_.alloc(npages, c.spanclass, true)
   233  	if s == nil {
   234  		return nil
   235  	}
   236  
   237  	// Use division by multiplication and shifts to quickly compute:
   238  	// n := (npages << _PageShift) / size
   239  	n := (npages << _PageShift) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   240  	s.limit = s.base() + size*n
   241  	heapBitsForAddr(s.base()).initSpan(s)
   242  	return s
   243  }