github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/mcentral.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
    10  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "runtime/internal/atomic"
    16  
    17  // Central list of free objects of a given size.
    18  type mcentral struct {
    19  	lock      mutex
    20  	sizeclass int32
    21  	nonempty  mSpanList // list of spans with a free object
    22  	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
    23  }
    24  
    25  // Initialize a single central free list.
    26  func (c *mcentral) init(sizeclass int32) {
    27  	c.sizeclass = sizeclass
    28  	c.nonempty.init()
    29  	c.empty.init()
    30  }
    31  
    32  // Allocate a span to use in an MCache.
    33  func (c *mcentral) cacheSpan() *mspan {
    34  	// Deduct credit for this span allocation and sweep if necessary.
    35  	deductSweepCredit(uintptr(class_to_size[c.sizeclass]), 0)
    36  
    37  	lock(&c.lock)
    38  	sg := mheap_.sweepgen
    39  retry:
    40  	var s *mspan
    41  	for s = c.nonempty.first; s != nil; s = s.next {
    42  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    43  			c.nonempty.remove(s)
    44  			c.empty.insertBack(s)
    45  			unlock(&c.lock)
    46  			s.sweep(true)
    47  			goto havespan
    48  		}
    49  		if s.sweepgen == sg-1 {
    50  			// the span is being swept by background sweeper, skip
    51  			continue
    52  		}
    53  		// we have a nonempty span that does not require sweeping, allocate from it
    54  		c.nonempty.remove(s)
    55  		c.empty.insertBack(s)
    56  		unlock(&c.lock)
    57  		goto havespan
    58  	}
    59  
    60  	for s = c.empty.first; s != nil; s = s.next {
    61  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    62  			// we have an empty span that requires sweeping,
    63  			// sweep it and see if we can free some space in it
    64  			c.empty.remove(s)
    65  			// swept spans are at the end of the list
    66  			c.empty.insertBack(s)
    67  			unlock(&c.lock)
    68  			s.sweep(true)
    69  			if s.freelist.ptr() != nil {
    70  				goto havespan
    71  			}
    72  			lock(&c.lock)
    73  			// the span is still empty after sweep
    74  			// it is already in the empty list, so just retry
    75  			goto retry
    76  		}
    77  		if s.sweepgen == sg-1 {
    78  			// the span is being swept by background sweeper, skip
    79  			continue
    80  		}
    81  		// already swept empty span,
    82  		// all subsequent ones must also be either swept or in process of sweeping
    83  		break
    84  	}
    85  	unlock(&c.lock)
    86  
    87  	// Replenish central list if empty.
    88  	s = c.grow()
    89  	if s == nil {
    90  		return nil
    91  	}
    92  	lock(&c.lock)
    93  	c.empty.insertBack(s)
    94  	unlock(&c.lock)
    95  
    96  	// At this point s is a non-empty span, queued at the end of the empty list,
    97  	// c is unlocked.
    98  havespan:
    99  	cap := int32((s.npages << _PageShift) / s.elemsize)
   100  	n := cap - int32(s.ref)
   101  	if n == 0 {
   102  		throw("empty span")
   103  	}
   104  	usedBytes := uintptr(s.ref) * s.elemsize
   105  	if usedBytes > 0 {
   106  		reimburseSweepCredit(usedBytes)
   107  	}
   108  	if s.freelist.ptr() == nil {
   109  		throw("freelist empty")
   110  	}
   111  	s.incache = true
   112  	return s
   113  }
   114  
   115  // Return span from an MCache.
   116  func (c *mcentral) uncacheSpan(s *mspan) {
   117  	lock(&c.lock)
   118  
   119  	s.incache = false
   120  
   121  	if s.ref == 0 {
   122  		throw("uncaching full span")
   123  	}
   124  
   125  	cap := int32((s.npages << _PageShift) / s.elemsize)
   126  	n := cap - int32(s.ref)
   127  	if n > 0 {
   128  		c.empty.remove(s)
   129  		c.nonempty.insert(s)
   130  	}
   131  	unlock(&c.lock)
   132  }
   133  
   134  // Free n objects from a span s back into the central free list c.
   135  // Called during sweep.
   136  // Returns true if the span was returned to heap.  Sets sweepgen to
   137  // the latest generation.
   138  // If preserve=true, don't return the span to heap nor relink in MCentral lists;
   139  // caller takes care of it.
   140  func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
   141  	if s.incache {
   142  		throw("freespan into cached span")
   143  	}
   144  
   145  	// Add the objects back to s's free list.
   146  	wasempty := s.freelist.ptr() == nil
   147  	end.ptr().next = s.freelist
   148  	s.freelist = start
   149  	s.ref -= uint16(n)
   150  
   151  	if preserve {
   152  		// preserve is set only when called from MCentral_CacheSpan above,
   153  		// the span must be in the empty list.
   154  		if !s.inList() {
   155  			throw("can't preserve unlinked span")
   156  		}
   157  		atomic.Store(&s.sweepgen, mheap_.sweepgen)
   158  		return false
   159  	}
   160  
   161  	lock(&c.lock)
   162  
   163  	// Move to nonempty if necessary.
   164  	if wasempty {
   165  		c.empty.remove(s)
   166  		c.nonempty.insert(s)
   167  	}
   168  
   169  	// delay updating sweepgen until here.  This is the signal that
   170  	// the span may be used in an MCache, so it must come after the
   171  	// linked list operations above (actually, just after the
   172  	// lock of c above.)
   173  	atomic.Store(&s.sweepgen, mheap_.sweepgen)
   174  
   175  	if s.ref != 0 {
   176  		unlock(&c.lock)
   177  		return false
   178  	}
   179  
   180  	// s is completely freed, return it to the heap.
   181  	c.nonempty.remove(s)
   182  	s.needzero = 1
   183  	s.freelist = 0
   184  	unlock(&c.lock)
   185  	heapBitsForSpan(s.base()).initSpan(s.layout())
   186  	mheap_.freeSpan(s, 0)
   187  	return true
   188  }
   189  
   190  // Fetch a new span from the heap and carve into objects for the free list.
   191  func (c *mcentral) grow() *mspan {
   192  	npages := uintptr(class_to_allocnpages[c.sizeclass])
   193  	size := uintptr(class_to_size[c.sizeclass])
   194  	n := (npages << _PageShift) / size
   195  
   196  	s := mheap_.alloc(npages, c.sizeclass, false, true)
   197  	if s == nil {
   198  		return nil
   199  	}
   200  
   201  	p := uintptr(s.start << _PageShift)
   202  	s.limit = p + size*n
   203  	head := gclinkptr(p)
   204  	tail := gclinkptr(p)
   205  	// i==0 iteration already done
   206  	for i := uintptr(1); i < n; i++ {
   207  		p += size
   208  		tail.ptr().next = gclinkptr(p)
   209  		tail = gclinkptr(p)
   210  	}
   211  	if s.freelist.ptr() != nil {
   212  		throw("freelist not empty")
   213  	}
   214  	tail.ptr().next = 0
   215  	s.freelist = head
   216  	heapBitsForSpan(s.base()).initSpan(s.layout())
   217  	return s
   218  }