github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/mcentral.c (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.h for an overview.
     8  //
     9  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
    10  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  //
    13  // TODO(rsc): tcmalloc uses a "transfer cache" to split the list
    14  // into sections of class_to_transfercount[sizeclass] objects
    15  // so that it is faster to move those lists between MCaches and MCentrals.
    16  
    17  #include "runtime.h"
    18  #include "arch.h"
    19  #include "malloc.h"
    20  
    21  static bool MCentral_Grow(MCentral *c);
    22  static void MCentral_Free(MCentral *c, MLink *v);
    23  static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
    24  
    25  // Initialize a single central free list.
    26  void
    27  runtime_MCentral_Init(MCentral *c, int32 sizeclass)
    28  {
    29  	c->sizeclass = sizeclass;
    30  	runtime_MSpanList_Init(&c->nonempty);
    31  	runtime_MSpanList_Init(&c->empty);
    32  }
    33  
    34  // Allocate a span to use in an MCache.
    35  MSpan*
    36  runtime_MCentral_CacheSpan(MCentral *c)
    37  {
    38  	MSpan *s;
    39  	int32 cap, n;
    40  	uint32 sg;
    41  
    42  	runtime_lock(&c->lock);
    43  	sg = runtime_mheap.sweepgen;
    44  retry:
    45  	for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
    46  		if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
    47  			runtime_unlock(&c->lock);
    48  			runtime_MSpan_Sweep(s);
    49  			runtime_lock(&c->lock);
    50  			// the span could have been moved to heap, retry
    51  			goto retry;
    52  		}
    53  		if(s->sweepgen == sg-1) {
    54  			// the span is being swept by background sweeper, skip
    55  			continue;
    56  		}
    57  		// we have a nonempty span that does not require sweeping, allocate from it
    58  		goto havespan;
    59  	}
    60  
    61  	for(s = c->empty.next; s != &c->empty; s = s->next) {
    62  		if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
    63  			// we have an empty span that requires sweeping,
    64  			// sweep it and see if we can free some space in it
    65  			runtime_MSpanList_Remove(s);
    66  			// swept spans are at the end of the list
    67  			runtime_MSpanList_InsertBack(&c->empty, s);
    68  			runtime_unlock(&c->lock);
    69  			runtime_MSpan_Sweep(s);
    70  			runtime_lock(&c->lock);
    71  			// the span could be moved to nonempty or heap, retry
    72  			goto retry;
    73  		}
    74  		if(s->sweepgen == sg-1) {
    75  			// the span is being swept by background sweeper, skip
    76  			continue;
    77  		}
    78  		// already swept empty span,
    79  		// all subsequent ones must also be either swept or in process of sweeping
    80  		break;
    81  	}
    82  
    83  	// Replenish central list if empty.
    84  	if(!MCentral_Grow(c)) {
    85  		runtime_unlock(&c->lock);
    86  		return nil;
    87  	}
    88  	goto retry;
    89  
    90  havespan:
    91  	cap = (s->npages << PageShift) / s->elemsize;
    92  	n = cap - s->ref;
    93  	if(n == 0)
    94  		runtime_throw("empty span");
    95  	if(s->freelist == nil)
    96  		runtime_throw("freelist empty");
    97  	c->nfree -= n;
    98  	runtime_MSpanList_Remove(s);
    99  	runtime_MSpanList_InsertBack(&c->empty, s);
   100  	s->incache = true;
   101  	runtime_unlock(&c->lock);
   102  	return s;
   103  }
   104  
   105  // Return span from an MCache.
   106  void
   107  runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s)
   108  {
   109  	MLink *v;
   110  	int32 cap, n;
   111  
   112  	runtime_lock(&c->lock);
   113  
   114  	s->incache = false;
   115  
   116  	// Move any explicitly freed items from the freebuf to the freelist.
   117  	while((v = s->freebuf) != nil) {
   118  		s->freebuf = v->next;
   119  		runtime_markfreed(v);
   120  		v->next = s->freelist;
   121  		s->freelist = v;
   122  		s->ref--;
   123  	}
   124  
   125  	if(s->ref == 0) {
   126  		// Free back to heap.  Unlikely, but possible.
   127  		MCentral_ReturnToHeap(c, s); // unlocks c
   128  		return;
   129  	}
   130  	
   131  	cap = (s->npages << PageShift) / s->elemsize;
   132  	n = cap - s->ref;
   133  	if(n > 0) {
   134  		c->nfree += n;
   135  		runtime_MSpanList_Remove(s);
   136  		runtime_MSpanList_Insert(&c->nonempty, s);
   137  	}
   138  	runtime_unlock(&c->lock);
   139  }
   140  
   141  // Free the list of objects back into the central free list c.
   142  // Called from runtime_free.
   143  void
   144  runtime_MCentral_FreeList(MCentral *c, MLink *start)
   145  {
   146  	MLink *next;
   147  
   148  	runtime_lock(&c->lock);
   149  	for(; start != nil; start = next) {
   150  		next = start->next;
   151  		MCentral_Free(c, start);
   152  	}
   153  	runtime_unlock(&c->lock);
   154  }
   155  
   156  // Helper: free one object back into the central free list.
   157  // Caller must hold lock on c on entry.  Holds lock on exit.
   158  static void
   159  MCentral_Free(MCentral *c, MLink *v)
   160  {
   161  	MSpan *s;
   162  
   163  	// Find span for v.
   164  	s = runtime_MHeap_Lookup(&runtime_mheap, v);
   165  	if(s == nil || s->ref == 0)
   166  		runtime_throw("invalid free");
   167  	if(s->sweepgen != runtime_mheap.sweepgen)
   168  		runtime_throw("free into unswept span");
   169  	
   170  	// If the span is currently being used unsynchronized by an MCache,
   171  	// we can't modify the freelist.  Add to the freebuf instead.  The
   172  	// items will get moved to the freelist when the span is returned
   173  	// by the MCache.
   174  	if(s->incache) {
   175  		v->next = s->freebuf;
   176  		s->freebuf = v;
   177  		return;
   178  	}
   179  
   180  	// Move span to nonempty if necessary.
   181  	if(s->freelist == nil) {
   182  		runtime_MSpanList_Remove(s);
   183  		runtime_MSpanList_Insert(&c->nonempty, s);
   184  	}
   185  
   186  	// Add the object to span's free list.
   187  	runtime_markfreed(v);
   188  	v->next = s->freelist;
   189  	s->freelist = v;
   190  	s->ref--;
   191  	c->nfree++;
   192  
   193  	// If s is completely freed, return it to the heap.
   194  	if(s->ref == 0) {
   195  		MCentral_ReturnToHeap(c, s); // unlocks c
   196  		runtime_lock(&c->lock);
   197  	}
   198  }
   199  
   200  // Free n objects from a span s back into the central free list c.
   201  // Called during sweep.
   202  // Returns true if the span was returned to heap.  Sets sweepgen to
   203  // the latest generation.
   204  bool
   205  runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
   206  {
   207  	if(s->incache)
   208  		runtime_throw("freespan into cached span");
   209  	runtime_lock(&c->lock);
   210  
   211  	// Move to nonempty if necessary.
   212  	if(s->freelist == nil) {
   213  		runtime_MSpanList_Remove(s);
   214  		runtime_MSpanList_Insert(&c->nonempty, s);
   215  	}
   216  
   217  	// Add the objects back to s's free list.
   218  	end->next = s->freelist;
   219  	s->freelist = start;
   220  	s->ref -= n;
   221  	c->nfree += n;
   222  	
   223  	// delay updating sweepgen until here.  This is the signal that
   224  	// the span may be used in an MCache, so it must come after the
   225  	// linked list operations above (actually, just after the
   226  	// lock of c above.)
   227  	runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
   228  
   229  	if(s->ref != 0) {
   230  		runtime_unlock(&c->lock);
   231  		return false;
   232  	}
   233  
   234  	// s is completely freed, return it to the heap.
   235  	MCentral_ReturnToHeap(c, s); // unlocks c
   236  	return true;
   237  }
   238  
   239  void
   240  runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
   241  {
   242  	int32 size;
   243  	int32 npages;
   244  
   245  	npages = runtime_class_to_allocnpages[sizeclass];
   246  	size = runtime_class_to_size[sizeclass];
   247  	*npagesp = npages;
   248  	*sizep = size;
   249  	*nobj = (npages << PageShift) / size;
   250  }
   251  
   252  // Fetch a new span from the heap and
   253  // carve into objects for the free list.
   254  static bool
   255  MCentral_Grow(MCentral *c)
   256  {
   257  	int32 i, n, npages;
   258  	uintptr size;
   259  	MLink **tailp, *v;
   260  	byte *p;
   261  	MSpan *s;
   262  
   263  	runtime_unlock(&c->lock);
   264  	runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
   265  	s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
   266  	if(s == nil) {
   267  		// TODO(rsc): Log out of memory
   268  		runtime_lock(&c->lock);
   269  		return false;
   270  	}
   271  
   272  	// Carve span into sequence of blocks.
   273  	tailp = &s->freelist;
   274  	p = (byte*)(s->start << PageShift);
   275  	s->limit = p + size*n;
   276  	for(i=0; i<n; i++) {
   277  		v = (MLink*)p;
   278  		*tailp = v;
   279  		tailp = &v->next;
   280  		p += size;
   281  	}
   282  	*tailp = nil;
   283  	runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
   284  
   285  	runtime_lock(&c->lock);
   286  	c->nfree += n;
   287  	runtime_MSpanList_Insert(&c->nonempty, s);
   288  	return true;
   289  }
   290  
   291  // Return s to the heap.  s must be unused (s->ref == 0).  Unlocks c.
   292  static void
   293  MCentral_ReturnToHeap(MCentral *c, MSpan *s)
   294  {
   295  	int32 size;
   296  
   297  	size = runtime_class_to_size[c->sizeclass];
   298  	runtime_MSpanList_Remove(s);
   299  	s->needzero = 1;
   300  	s->freelist = nil;
   301  	if(s->ref != 0)
   302  		runtime_throw("ref wrong");
   303  	c->nfree -= (s->npages << PageShift) / size;
   304  	runtime_unlock(&c->lock);
   305  	runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
   306  	runtime_MHeap_Free(&runtime_mheap, s, 0);
   307  }