github.com/aloncn/graphics-go@v0.0.1/src/runtime/mcentral.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
    10  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "runtime/internal/atomic"
    16  
    17  // Central list of free objects of a given size.
    18  type mcentral struct {
    19  	lock      mutex
    20  	sizeclass int32
    21  	nonempty  mSpanList // list of spans with a free object
    22  	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
    23  }
    24  
    25  // Initialize a single central free list.
    26  func (c *mcentral) init(sizeclass int32) {
    27  	c.sizeclass = sizeclass
    28  	c.nonempty.init()
    29  	c.empty.init()
    30  }
    31  
    32  // Allocate a span to use in an MCache.
    33  func (c *mcentral) cacheSpan() *mspan {
    34  	// Deduct credit for this span allocation and sweep if necessary.
    35  	spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
    36  	deductSweepCredit(spanBytes, 0)
    37  
    38  	lock(&c.lock)
    39  	sg := mheap_.sweepgen
    40  retry:
    41  	var s *mspan
    42  	for s = c.nonempty.first; s != nil; s = s.next {
    43  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    44  			c.nonempty.remove(s)
    45  			c.empty.insertBack(s)
    46  			unlock(&c.lock)
    47  			s.sweep(true)
    48  			goto havespan
    49  		}
    50  		if s.sweepgen == sg-1 {
    51  			// the span is being swept by background sweeper, skip
    52  			continue
    53  		}
    54  		// we have a nonempty span that does not require sweeping, allocate from it
    55  		c.nonempty.remove(s)
    56  		c.empty.insertBack(s)
    57  		unlock(&c.lock)
    58  		goto havespan
    59  	}
    60  
    61  	for s = c.empty.first; s != nil; s = s.next {
    62  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    63  			// we have an empty span that requires sweeping,
    64  			// sweep it and see if we can free some space in it
    65  			c.empty.remove(s)
    66  			// swept spans are at the end of the list
    67  			c.empty.insertBack(s)
    68  			unlock(&c.lock)
    69  			s.sweep(true)
    70  			if s.freelist.ptr() != nil {
    71  				goto havespan
    72  			}
    73  			lock(&c.lock)
    74  			// the span is still empty after sweep
    75  			// it is already in the empty list, so just retry
    76  			goto retry
    77  		}
    78  		if s.sweepgen == sg-1 {
    79  			// the span is being swept by background sweeper, skip
    80  			continue
    81  		}
    82  		// already swept empty span,
    83  		// all subsequent ones must also be either swept or in process of sweeping
    84  		break
    85  	}
    86  	unlock(&c.lock)
    87  
    88  	// Replenish central list if empty.
    89  	s = c.grow()
    90  	if s == nil {
    91  		return nil
    92  	}
    93  	lock(&c.lock)
    94  	c.empty.insertBack(s)
    95  	unlock(&c.lock)
    96  
    97  	// At this point s is a non-empty span, queued at the end of the empty list,
    98  	// c is unlocked.
    99  havespan:
   100  	cap := int32((s.npages << _PageShift) / s.elemsize)
   101  	n := cap - int32(s.ref)
   102  	if n == 0 {
   103  		throw("empty span")
   104  	}
   105  	usedBytes := uintptr(s.ref) * s.elemsize
   106  	if usedBytes > 0 {
   107  		reimburseSweepCredit(usedBytes)
   108  	}
   109  	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
   110  	if trace.enabled {
   111  		// heap_live changed.
   112  		traceHeapAlloc()
   113  	}
   114  	if gcBlackenEnabled != 0 {
   115  		// heap_live changed.
   116  		gcController.revise()
   117  	}
   118  	if s.freelist.ptr() == nil {
   119  		throw("freelist empty")
   120  	}
   121  	s.incache = true
   122  	return s
   123  }
   124  
   125  // Return span from an MCache.
   126  func (c *mcentral) uncacheSpan(s *mspan) {
   127  	lock(&c.lock)
   128  
   129  	s.incache = false
   130  
   131  	if s.ref == 0 {
   132  		throw("uncaching full span")
   133  	}
   134  
   135  	cap := int32((s.npages << _PageShift) / s.elemsize)
   136  	n := cap - int32(s.ref)
   137  	if n > 0 {
   138  		c.empty.remove(s)
   139  		c.nonempty.insert(s)
   140  		// mCentral_CacheSpan conservatively counted
   141  		// unallocated slots in heap_live. Undo this.
   142  		atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
   143  	}
   144  	unlock(&c.lock)
   145  }
   146  
   147  // Free n objects from a span s back into the central free list c.
   148  // Called during sweep.
   149  // Returns true if the span was returned to heap.  Sets sweepgen to
   150  // the latest generation.
   151  // If preserve=true, don't return the span to heap nor relink in MCentral lists;
   152  // caller takes care of it.
   153  func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
   154  	if s.incache {
   155  		throw("freespan into cached span")
   156  	}
   157  
   158  	// Add the objects back to s's free list.
   159  	wasempty := s.freelist.ptr() == nil
   160  	end.ptr().next = s.freelist
   161  	s.freelist = start
   162  	s.ref -= uint16(n)
   163  
   164  	if preserve {
   165  		// preserve is set only when called from MCentral_CacheSpan above,
   166  		// the span must be in the empty list.
   167  		if !s.inList() {
   168  			throw("can't preserve unlinked span")
   169  		}
   170  		atomic.Store(&s.sweepgen, mheap_.sweepgen)
   171  		return false
   172  	}
   173  
   174  	lock(&c.lock)
   175  
   176  	// Move to nonempty if necessary.
   177  	if wasempty {
   178  		c.empty.remove(s)
   179  		c.nonempty.insert(s)
   180  	}
   181  
   182  	// delay updating sweepgen until here.  This is the signal that
   183  	// the span may be used in an MCache, so it must come after the
   184  	// linked list operations above (actually, just after the
   185  	// lock of c above.)
   186  	atomic.Store(&s.sweepgen, mheap_.sweepgen)
   187  
   188  	if s.ref != 0 {
   189  		unlock(&c.lock)
   190  		return false
   191  	}
   192  
   193  	// s is completely freed, return it to the heap.
   194  	c.nonempty.remove(s)
   195  	s.needzero = 1
   196  	s.freelist = 0
   197  	unlock(&c.lock)
   198  	heapBitsForSpan(s.base()).initSpan(s.layout())
   199  	mheap_.freeSpan(s, 0)
   200  	return true
   201  }
   202  
   203  // Fetch a new span from the heap and carve into objects for the free list.
   204  func (c *mcentral) grow() *mspan {
   205  	npages := uintptr(class_to_allocnpages[c.sizeclass])
   206  	size := uintptr(class_to_size[c.sizeclass])
   207  	n := (npages << _PageShift) / size
   208  
   209  	s := mheap_.alloc(npages, c.sizeclass, false, true)
   210  	if s == nil {
   211  		return nil
   212  	}
   213  
   214  	p := uintptr(s.start << _PageShift)
   215  	s.limit = p + size*n
   216  	head := gclinkptr(p)
   217  	tail := gclinkptr(p)
   218  	// i==0 iteration already done
   219  	for i := uintptr(1); i < n; i++ {
   220  		p += size
   221  		tail.ptr().next = gclinkptr(p)
   222  		tail = gclinkptr(p)
   223  	}
   224  	if s.freelist.ptr() != nil {
   225  		throw("freelist not empty")
   226  	}
   227  	tail.ptr().next = 0
   228  	s.freelist = head
   229  	heapBitsForSpan(s.base()).initSpan(s.layout())
   230  	return s
   231  }