github.com/shijuvar/go@v0.0.0-20141209052335-e8f13700b70c/src/runtime/mcentral.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.h for an overview.
     8  //
     9  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
    10  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "unsafe"
    16  
    17  // Initialize a single central free list.
    18  func mCentral_Init(c *mcentral, sizeclass int32) {
    19  	c.sizeclass = sizeclass
    20  	mSpanList_Init(&c.nonempty)
    21  	mSpanList_Init(&c.empty)
    22  }
    23  
    24  // Allocate a span to use in an MCache.
    25  func mCentral_CacheSpan(c *mcentral) *mspan {
    26  	lock(&c.lock)
    27  	sg := mheap_.sweepgen
    28  retry:
    29  	var s *mspan
    30  	for s = c.nonempty.next; s != &c.nonempty; s = s.next {
    31  		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
    32  			mSpanList_Remove(s)
    33  			mSpanList_InsertBack(&c.empty, s)
    34  			unlock(&c.lock)
    35  			mSpan_Sweep(s, true)
    36  			goto havespan
    37  		}
    38  		if s.sweepgen == sg-1 {
    39  			// the span is being swept by background sweeper, skip
    40  			continue
    41  		}
    42  		// we have a nonempty span that does not require sweeping, allocate from it
    43  		mSpanList_Remove(s)
    44  		mSpanList_InsertBack(&c.empty, s)
    45  		unlock(&c.lock)
    46  		goto havespan
    47  	}
    48  
    49  	for s = c.empty.next; s != &c.empty; s = s.next {
    50  		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
    51  			// we have an empty span that requires sweeping,
    52  			// sweep it and see if we can free some space in it
    53  			mSpanList_Remove(s)
    54  			// swept spans are at the end of the list
    55  			mSpanList_InsertBack(&c.empty, s)
    56  			unlock(&c.lock)
    57  			mSpan_Sweep(s, true)
    58  			if s.freelist.ptr() != nil {
    59  				goto havespan
    60  			}
    61  			lock(&c.lock)
    62  			// the span is still empty after sweep
    63  			// it is already in the empty list, so just retry
    64  			goto retry
    65  		}
    66  		if s.sweepgen == sg-1 {
    67  			// the span is being swept by background sweeper, skip
    68  			continue
    69  		}
    70  		// already swept empty span,
    71  		// all subsequent ones must also be either swept or in process of sweeping
    72  		break
    73  	}
    74  	unlock(&c.lock)
    75  
    76  	// Replenish central list if empty.
    77  	s = mCentral_Grow(c)
    78  	if s == nil {
    79  		return nil
    80  	}
    81  	lock(&c.lock)
    82  	mSpanList_InsertBack(&c.empty, s)
    83  	unlock(&c.lock)
    84  
    85  	// At this point s is a non-empty span, queued at the end of the empty list,
    86  	// c is unlocked.
    87  havespan:
    88  	cap := int32((s.npages << _PageShift) / s.elemsize)
    89  	n := cap - int32(s.ref)
    90  	if n == 0 {
    91  		gothrow("empty span")
    92  	}
    93  	if s.freelist.ptr() == nil {
    94  		gothrow("freelist empty")
    95  	}
    96  	s.incache = true
    97  	return s
    98  }
    99  
   100  // Return span from an MCache.
   101  func mCentral_UncacheSpan(c *mcentral, s *mspan) {
   102  	lock(&c.lock)
   103  
   104  	s.incache = false
   105  
   106  	if s.ref == 0 {
   107  		gothrow("uncaching full span")
   108  	}
   109  
   110  	cap := int32((s.npages << _PageShift) / s.elemsize)
   111  	n := cap - int32(s.ref)
   112  	if n > 0 {
   113  		mSpanList_Remove(s)
   114  		mSpanList_Insert(&c.nonempty, s)
   115  	}
   116  	unlock(&c.lock)
   117  }
   118  
   119  // Free n objects from a span s back into the central free list c.
   120  // Called during sweep.
   121  // Returns true if the span was returned to heap.  Sets sweepgen to
   122  // the latest generation.
   123  // If preserve=true, don't return the span to heap nor relink in MCentral lists;
   124  // caller takes care of it.
   125  func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
   126  	if s.incache {
   127  		gothrow("freespan into cached span")
   128  	}
   129  
   130  	// Add the objects back to s's free list.
   131  	wasempty := s.freelist.ptr() == nil
   132  	end.ptr().next = s.freelist
   133  	s.freelist = start
   134  	s.ref -= uint16(n)
   135  
   136  	if preserve {
   137  		// preserve is set only when called from MCentral_CacheSpan above,
   138  		// the span must be in the empty list.
   139  		if s.next == nil {
   140  			gothrow("can't preserve unlinked span")
   141  		}
   142  		atomicstore(&s.sweepgen, mheap_.sweepgen)
   143  		return false
   144  	}
   145  
   146  	lock(&c.lock)
   147  
   148  	// Move to nonempty if necessary.
   149  	if wasempty {
   150  		mSpanList_Remove(s)
   151  		mSpanList_Insert(&c.nonempty, s)
   152  	}
   153  
   154  	// delay updating sweepgen until here.  This is the signal that
   155  	// the span may be used in an MCache, so it must come after the
   156  	// linked list operations above (actually, just after the
   157  	// lock of c above.)
   158  	atomicstore(&s.sweepgen, mheap_.sweepgen)
   159  
   160  	if s.ref != 0 {
   161  		unlock(&c.lock)
   162  		return false
   163  	}
   164  
   165  	// s is completely freed, return it to the heap.
   166  	mSpanList_Remove(s)
   167  	s.needzero = 1
   168  	s.freelist = 0
   169  	unlock(&c.lock)
   170  	unmarkspan(uintptr(s.start)<<_PageShift, s.npages<<_PageShift)
   171  	mHeap_Free(&mheap_, s, 0)
   172  	return true
   173  }
   174  
   175  // Fetch a new span from the heap and carve into objects for the free list.
   176  func mCentral_Grow(c *mcentral) *mspan {
   177  	npages := uintptr(class_to_allocnpages[c.sizeclass])
   178  	size := uintptr(class_to_size[c.sizeclass])
   179  	n := (npages << _PageShift) / size
   180  
   181  	s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
   182  	if s == nil {
   183  		return nil
   184  	}
   185  
   186  	p := uintptr(s.start << _PageShift)
   187  	s.limit = p + size*n
   188  	head := gclinkptr(p)
   189  	tail := gclinkptr(p)
   190  	// i==0 iteration already done
   191  	for i := uintptr(1); i < n; i++ {
   192  		p += size
   193  		tail.ptr().next = gclinkptr(p)
   194  		tail = gclinkptr(p)
   195  	}
   196  	if s.freelist.ptr() != nil {
   197  		gothrow("freelist not empty")
   198  	}
   199  	tail.ptr().next = 0
   200  	s.freelist = head
   201  	markspan(unsafe.Pointer(uintptr(s.start)<<_PageShift), size, n, size*n < s.npages<<_PageShift)
   202  	return s
   203  }