github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/mcache.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Per-thread (in Go, per-P) cache for small objects.
    10  // No locking needed because it is per-thread (per-P).
    11  //
    12  // mcaches are allocated from non-GC'd memory, so any heap pointers
    13  // must be specially handled.
    14  type mcache struct {
    15  	// The following members are accessed on every malloc,
    16  	// so they are grouped here for better caching.
    17  	next_sample      int32   // trigger heap sample after allocating this many bytes
    18  	local_cachealloc uintptr // bytes allocated from cache since last lock of heap
    19  	local_scan       uintptr // bytes of scannable heap allocated
    20  
    21  	// Allocator cache for tiny objects w/o pointers.
    22  	// See "Tiny allocator" comment in malloc.go.
    23  
    24  	// tiny points to the beginning of the current tiny block, or
    25  	// nil if there is no current tiny block.
    26  	//
    27  	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    28  	// we handle it by clearing it in releaseAll during mark
    29  	// termination.
    30  	tiny             uintptr
    31  	tinyoffset       uintptr
    32  	local_tinyallocs uintptr // number of tiny allocs not counted in other stats
    33  
    34  	// The rest is not accessed on every malloc.
    35  	alloc [_NumSizeClasses]*mspan // spans to allocate from
    36  
    37  	stackcache [_NumStackOrders]stackfreelist
    38  
    39  	// Local allocator stats, flushed during GC.
    40  	local_nlookup    uintptr                  // number of pointer lookups
    41  	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
    42  	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
    43  	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
    44  }
    45  
    46  // A gclink is a node in a linked list of blocks, like mlink,
    47  // but it is opaque to the garbage collector.
    48  // The GC does not trace the pointers during collection,
    49  // and the compiler does not emit write barriers for assignments
    50  // of gclinkptr values. Code should store references to gclinks
    51  // as gclinkptr, not as *gclink.
    52  type gclink struct {
    53  	next gclinkptr
    54  }
    55  
    56  // A gclinkptr is a pointer to a gclink, but it is opaque
    57  // to the garbage collector.
    58  type gclinkptr uintptr
    59  
    60  // ptr returns the *gclink form of p.
    61  // The result should be used for accessing fields, not stored
    62  // in other data structures.
    63  func (p gclinkptr) ptr() *gclink {
    64  	return (*gclink)(unsafe.Pointer(p))
    65  }
    66  
    67  type stackfreelist struct {
    68  	list gclinkptr // linked list of free stacks
    69  	size uintptr   // total size of stacks in list
    70  }
    71  
    72  // dummy MSpan that contains no free objects.
    73  var emptymspan mspan
    74  
    75  func allocmcache() *mcache {
    76  	lock(&mheap_.lock)
    77  	c := (*mcache)(mheap_.cachealloc.alloc())
    78  	unlock(&mheap_.lock)
    79  	memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
    80  	for i := 0; i < _NumSizeClasses; i++ {
    81  		c.alloc[i] = &emptymspan
    82  	}
    83  	c.next_sample = nextSample()
    84  	return c
    85  }
    86  
    87  func freemcache(c *mcache) {
    88  	systemstack(func() {
    89  		c.releaseAll()
    90  		stackcache_clear(c)
    91  
    92  		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
    93  		// with the stealing of gcworkbufs during garbage collection to avoid
    94  		// a race where the workbuf is double-freed.
    95  		// gcworkbuffree(c.gcworkbuf)
    96  
    97  		lock(&mheap_.lock)
    98  		purgecachedstats(c)
    99  		mheap_.cachealloc.free(unsafe.Pointer(c))
   100  		unlock(&mheap_.lock)
   101  	})
   102  }
   103  
   104  // Gets a span that has a free object in it and assigns it
   105  // to be the cached span for the given sizeclass.  Returns this span.
   106  func (c *mcache) refill(sizeclass int32) *mspan {
   107  	_g_ := getg()
   108  
   109  	_g_.m.locks++
   110  	// Return the current cached span to the central lists.
   111  	s := c.alloc[sizeclass]
   112  	if s.freelist.ptr() != nil {
   113  		throw("refill on a nonempty span")
   114  	}
   115  	if s != &emptymspan {
   116  		s.incache = false
   117  	}
   118  
   119  	// Get a new cached span from the central lists.
   120  	s = mheap_.central[sizeclass].mcentral.cacheSpan()
   121  	if s == nil {
   122  		throw("out of memory")
   123  	}
   124  	if s.freelist.ptr() == nil {
   125  		println(s.ref, (s.npages<<_PageShift)/s.elemsize)
   126  		throw("empty span")
   127  	}
   128  	c.alloc[sizeclass] = s
   129  	_g_.m.locks--
   130  	return s
   131  }
   132  
   133  func (c *mcache) releaseAll() {
   134  	for i := 0; i < _NumSizeClasses; i++ {
   135  		s := c.alloc[i]
   136  		if s != &emptymspan {
   137  			mheap_.central[i].mcentral.uncacheSpan(s)
   138  			c.alloc[i] = &emptymspan
   139  		}
   140  	}
   141  	// Clear tinyalloc pool.
   142  	c.tiny = 0
   143  	c.tinyoffset = 0
   144  }