github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/runtime/mcache.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Per-thread (in Go, per-P) cache for small objects.
    10  // No locking needed because it is per-thread (per-P).
    11  //
    12  // mcaches are allocated from non-GC'd memory, so any heap pointers
    13  // must be specially handled.
    14  //
    15  //go:notinheap
    16  type mcache struct {
    17  	// The following members are accessed on every malloc,
    18  	// so they are grouped here for better caching.
    19  	next_sample int32   // trigger heap sample after allocating this many bytes
    20  	local_scan  uintptr // bytes of scannable heap allocated
    21  
    22  	// Allocator cache for tiny objects w/o pointers.
    23  	// See "Tiny allocator" comment in malloc.go.
    24  
    25  	// tiny points to the beginning of the current tiny block, or
    26  	// nil if there is no current tiny block.
    27  	//
    28  	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    29  	// we handle it by clearing it in releaseAll during mark
    30  	// termination.
    31  	tiny             uintptr
    32  	tinyoffset       uintptr
    33  	local_tinyallocs uintptr // number of tiny allocs not counted in other stats
    34  
    35  	// The rest is not accessed on every malloc.
    36  
    37  	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
    38  
    39  	stackcache [_NumStackOrders]stackfreelist
    40  
    41  	// Local allocator stats, flushed during GC.
    42  	local_nlookup    uintptr                  // number of pointer lookups
    43  	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
    44  	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
    45  	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
    46  }
    47  
    48  // A gclink is a node in a linked list of blocks, like mlink,
    49  // but it is opaque to the garbage collector.
    50  // The GC does not trace the pointers during collection,
    51  // and the compiler does not emit write barriers for assignments
    52  // of gclinkptr values. Code should store references to gclinks
    53  // as gclinkptr, not as *gclink.
    54  type gclink struct {
    55  	next gclinkptr
    56  }
    57  
    58  // A gclinkptr is a pointer to a gclink, but it is opaque
    59  // to the garbage collector.
    60  type gclinkptr uintptr
    61  
    62  // ptr returns the *gclink form of p.
    63  // The result should be used for accessing fields, not stored
    64  // in other data structures.
    65  func (p gclinkptr) ptr() *gclink {
    66  	return (*gclink)(unsafe.Pointer(p))
    67  }
    68  
    69  type stackfreelist struct {
    70  	list gclinkptr // linked list of free stacks
    71  	size uintptr   // total size of stacks in list
    72  }
    73  
    74  // dummy MSpan that contains no free objects.
    75  var emptymspan mspan
    76  
    77  func allocmcache() *mcache {
    78  	lock(&mheap_.lock)
    79  	c := (*mcache)(mheap_.cachealloc.alloc())
    80  	unlock(&mheap_.lock)
    81  	for i := range c.alloc {
    82  		c.alloc[i] = &emptymspan
    83  	}
    84  	c.next_sample = nextSample()
    85  	return c
    86  }
    87  
    88  func freemcache(c *mcache) {
    89  	systemstack(func() {
    90  		c.releaseAll()
    91  		stackcache_clear(c)
    92  
    93  		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
    94  		// with the stealing of gcworkbufs during garbage collection to avoid
    95  		// a race where the workbuf is double-freed.
    96  		// gcworkbuffree(c.gcworkbuf)
    97  
    98  		lock(&mheap_.lock)
    99  		purgecachedstats(c)
   100  		mheap_.cachealloc.free(unsafe.Pointer(c))
   101  		unlock(&mheap_.lock)
   102  	})
   103  }
   104  
   105  // Gets a span that has a free object in it and assigns it
   106  // to be the cached span for the given sizeclass. Returns this span.
   107  func (c *mcache) refill(spc spanClass) *mspan {
   108  	_g_ := getg()
   109  
   110  	_g_.m.locks++
   111  	// Return the current cached span to the central lists.
   112  	s := c.alloc[spc]
   113  
   114  	if uintptr(s.allocCount) != s.nelems {
   115  		throw("refill of span with free space remaining")
   116  	}
   117  
   118  	if s != &emptymspan {
   119  		s.incache = false
   120  	}
   121  
   122  	// Get a new cached span from the central lists.
   123  	s = mheap_.central[spc].mcentral.cacheSpan()
   124  	if s == nil {
   125  		throw("out of memory")
   126  	}
   127  
   128  	if uintptr(s.allocCount) == s.nelems {
   129  		throw("span has no free space")
   130  	}
   131  
   132  	c.alloc[spc] = s
   133  	_g_.m.locks--
   134  	return s
   135  }
   136  
   137  func (c *mcache) releaseAll() {
   138  	for i := range c.alloc {
   139  		s := c.alloc[i]
   140  		if s != &emptymspan {
   141  			mheap_.central[i].mcentral.uncacheSpan(s)
   142  			c.alloc[i] = &emptymspan
   143  		}
   144  	}
   145  	// Clear tinyalloc pool.
   146  	c.tiny = 0
   147  	c.tinyoffset = 0
   148  }