github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/mcentral.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
    10  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "runtime/internal/atomic"
    16  
    17  // Central list of free objects of a given size.
    18  type mcentral struct {
    19  	lock      mutex
    20  	sizeclass int32
    21  	nonempty  mSpanList // list of spans with a free object, ie a nonempty free list
    22  	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
    23  }
    24  
    25  // Initialize a single central free list.
    26  func (c *mcentral) init(sizeclass int32) {
    27  	c.sizeclass = sizeclass
    28  	c.nonempty.init()
    29  	c.empty.init()
    30  }
    31  
    32  // Allocate a span to use in an MCache.
    33  func (c *mcentral) cacheSpan() *mspan {
    34  	// Deduct credit for this span allocation and sweep if necessary.
    35  	spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
    36  	deductSweepCredit(spanBytes, 0)
    37  
    38  	lock(&c.lock)
    39  	sg := mheap_.sweepgen
    40  retry:
    41  	var s *mspan
    42  	for s = c.nonempty.first; s != nil; s = s.next {
    43  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    44  			c.nonempty.remove(s)
    45  			c.empty.insertBack(s)
    46  			unlock(&c.lock)
    47  			s.sweep(true)
    48  			goto havespan
    49  		}
    50  		if s.sweepgen == sg-1 {
    51  			// the span is being swept by background sweeper, skip
    52  			continue
    53  		}
    54  		// we have a nonempty span that does not require sweeping, allocate from it
    55  		c.nonempty.remove(s)
    56  		c.empty.insertBack(s)
    57  		unlock(&c.lock)
    58  		goto havespan
    59  	}
    60  
    61  	for s = c.empty.first; s != nil; s = s.next {
    62  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    63  			// we have an empty span that requires sweeping,
    64  			// sweep it and see if we can free some space in it
    65  			c.empty.remove(s)
    66  			// swept spans are at the end of the list
    67  			c.empty.insertBack(s)
    68  			unlock(&c.lock)
    69  			s.sweep(true)
    70  			freeIndex := s.nextFreeIndex()
    71  			if freeIndex != s.nelems {
    72  				s.freeindex = freeIndex
    73  				goto havespan
    74  			}
    75  			lock(&c.lock)
    76  			// the span is still empty after sweep
    77  			// it is already in the empty list, so just retry
    78  			goto retry
    79  		}
    80  		if s.sweepgen == sg-1 {
    81  			// the span is being swept by background sweeper, skip
    82  			continue
    83  		}
    84  		// already swept empty span,
    85  		// all subsequent ones must also be either swept or in process of sweeping
    86  		break
    87  	}
    88  	unlock(&c.lock)
    89  
    90  	// Replenish central list if empty.
    91  	s = c.grow()
    92  	if s == nil {
    93  		return nil
    94  	}
    95  	lock(&c.lock)
    96  	c.empty.insertBack(s)
    97  	unlock(&c.lock)
    98  
    99  	// At this point s is a non-empty span, queued at the end of the empty list,
   100  	// c is unlocked.
   101  havespan:
   102  	cap := int32((s.npages << _PageShift) / s.elemsize)
   103  	n := cap - int32(s.allocCount)
   104  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
   105  		throw("span has no free objects")
   106  	}
   107  	usedBytes := uintptr(s.allocCount) * s.elemsize
   108  	if usedBytes > 0 {
   109  		reimburseSweepCredit(usedBytes)
   110  	}
   111  	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
   112  	if trace.enabled {
   113  		// heap_live changed.
   114  		traceHeapAlloc()
   115  	}
   116  	if gcBlackenEnabled != 0 {
   117  		// heap_live changed.
   118  		gcController.revise()
   119  	}
   120  	s.incache = true
   121  	freeByteBase := s.freeindex &^ (64 - 1)
   122  	whichByte := freeByteBase / 8
   123  	// Init alloc bits cache.
   124  	s.refillAllocCache(whichByte)
   125  
   126  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   127  	// s.allocCache.
   128  	s.allocCache >>= s.freeindex % 64
   129  
   130  	return s
   131  }
   132  
   133  // Return span from an MCache.
   134  func (c *mcentral) uncacheSpan(s *mspan) {
   135  	lock(&c.lock)
   136  
   137  	s.incache = false
   138  
   139  	if s.allocCount == 0 {
   140  		throw("uncaching span but s.allocCount == 0")
   141  	}
   142  
   143  	cap := int32((s.npages << _PageShift) / s.elemsize)
   144  	n := cap - int32(s.allocCount)
   145  	if n > 0 {
   146  		c.empty.remove(s)
   147  		c.nonempty.insert(s)
   148  		// mCentral_CacheSpan conservatively counted
   149  		// unallocated slots in heap_live. Undo this.
   150  		atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
   151  	}
   152  	unlock(&c.lock)
   153  }
   154  
   155  // freeSpan updates c and s after sweeping s.
   156  // It sets s's sweepgen to the latest generation,
   157  // and, based on the number of free objects in s,
   158  // moves s to the appropriate list of c or returns it
   159  // to the heap.
   160  // freeSpan returns true if s was returned to the heap.
   161  // If preserve=true, it does not move s (the caller
   162  // must take care of it).
   163  func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
   164  	if s.incache {
   165  		throw("freeSpan given cached span")
   166  	}
   167  	s.needzero = 1
   168  
   169  	if preserve {
   170  		// preserve is set only when called from MCentral_CacheSpan above,
   171  		// the span must be in the empty list.
   172  		if !s.inList() {
   173  			throw("can't preserve unlinked span")
   174  		}
   175  		atomic.Store(&s.sweepgen, mheap_.sweepgen)
   176  		return false
   177  	}
   178  
   179  	lock(&c.lock)
   180  
   181  	// Move to nonempty if necessary.
   182  	if wasempty {
   183  		c.empty.remove(s)
   184  		c.nonempty.insert(s)
   185  	}
   186  
   187  	// delay updating sweepgen until here. This is the signal that
   188  	// the span may be used in an MCache, so it must come after the
   189  	// linked list operations above (actually, just after the
   190  	// lock of c above.)
   191  	atomic.Store(&s.sweepgen, mheap_.sweepgen)
   192  
   193  	if s.allocCount != 0 {
   194  		unlock(&c.lock)
   195  		return false
   196  	}
   197  
   198  	c.nonempty.remove(s)
   199  	unlock(&c.lock)
   200  	mheap_.freeSpan(s, 0)
   201  	return true
   202  }
   203  
   204  // grow allocates a new empty span from the heap and initializes it for c's size class.
   205  func (c *mcentral) grow() *mspan {
   206  	npages := uintptr(class_to_allocnpages[c.sizeclass])
   207  	size := uintptr(class_to_size[c.sizeclass])
   208  	n := (npages << _PageShift) / size
   209  
   210  	s := mheap_.alloc(npages, c.sizeclass, false, true)
   211  	if s == nil {
   212  		return nil
   213  	}
   214  
   215  	p := s.base()
   216  	s.limit = p + size*n
   217  
   218  	heapBitsForSpan(s.base()).initSpan(s)
   219  	return s
   220  }