github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/runtime/mcache.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "unsafe" 10 ) 11 12 // Per-thread (in Go, per-P) cache for small objects. 13 // No locking needed because it is per-thread (per-P). 14 // 15 // mcaches are allocated from non-GC'd memory, so any heap pointers 16 // must be specially handled. 17 // 18 //go:notinheap 19 type mcache struct { 20 // The following members are accessed on every malloc, 21 // so they are grouped here for better caching. 22 next_sample int32 // trigger heap sample after allocating this many bytes 23 local_scan uintptr // bytes of scannable heap allocated 24 25 // Allocator cache for tiny objects w/o pointers. 26 // See "Tiny allocator" comment in malloc.go. 27 28 // tiny points to the beginning of the current tiny block, or 29 // nil if there is no current tiny block. 30 // 31 // tiny is a heap pointer. Since mcache is in non-GC'd memory, 32 // we handle it by clearing it in releaseAll during mark 33 // termination. 34 tiny uintptr 35 tinyoffset uintptr 36 local_tinyallocs uintptr // number of tiny allocs not counted in other stats 37 38 // The rest is not accessed on every malloc. 39 40 alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass 41 42 stackcache [_NumStackOrders]stackfreelist 43 44 // Local allocator stats, flushed during GC. 45 local_largefree uintptr // bytes freed for large objects (>maxsmallsize) 46 local_nlargefree uintptr // number of frees for large objects (>maxsmallsize) 47 local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize) 48 49 // flushGen indicates the sweepgen during which this mcache 50 // was last flushed. If flushGen != mheap_.sweepgen, the spans 51 // in this mcache are stale and need to the flushed so they 52 // can be swept. This is done in acquirep. 53 flushGen uint32 54 } 55 56 // A gclink is a node in a linked list of blocks, like mlink, 57 // but it is opaque to the garbage collector. 58 // The GC does not trace the pointers during collection, 59 // and the compiler does not emit write barriers for assignments 60 // of gclinkptr values. Code should store references to gclinks 61 // as gclinkptr, not as *gclink. 62 type gclink struct { 63 next gclinkptr 64 } 65 66 // A gclinkptr is a pointer to a gclink, but it is opaque 67 // to the garbage collector. 68 type gclinkptr uintptr 69 70 // ptr returns the *gclink form of p. 71 // The result should be used for accessing fields, not stored 72 // in other data structures. 73 func (p gclinkptr) ptr() *gclink { 74 return (*gclink)(unsafe.Pointer(p)) 75 } 76 77 type stackfreelist struct { 78 list gclinkptr // linked list of free stacks 79 size uintptr // total size of stacks in list 80 } 81 82 // dummy MSpan that contains no free objects. 83 var emptymspan mspan 84 85 func allocmcache() *mcache { 86 lock(&mheap_.lock) 87 c := (*mcache)(mheap_.cachealloc.alloc()) 88 c.flushGen = mheap_.sweepgen 89 unlock(&mheap_.lock) 90 for i := range c.alloc { 91 c.alloc[i] = &emptymspan 92 } 93 c.next_sample = nextSample() 94 return c 95 } 96 97 func freemcache(c *mcache) { 98 systemstack(func() { 99 c.releaseAll() 100 stackcache_clear(c) 101 102 // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate 103 // with the stealing of gcworkbufs during garbage collection to avoid 104 // a race where the workbuf is double-freed. 105 // gcworkbuffree(c.gcworkbuf) 106 107 lock(&mheap_.lock) 108 purgecachedstats(c) 109 mheap_.cachealloc.free(unsafe.Pointer(c)) 110 unlock(&mheap_.lock) 111 }) 112 } 113 114 // refill acquires a new span of span class spc for c. This span will 115 // have at least one free object. The current span in c must be full. 116 // 117 // Must run in a non-preemptible context since otherwise the owner of 118 // c could change. 119 func (c *mcache) refill(spc spanClass) { 120 // Return the current cached span to the central lists. 121 s := c.alloc[spc] 122 123 if uintptr(s.allocCount) != s.nelems { 124 throw("refill of span with free space remaining") 125 } 126 if s != &emptymspan { 127 // Mark this span as no longer cached. 128 if s.sweepgen != mheap_.sweepgen+3 { 129 throw("bad sweepgen in refill") 130 } 131 atomic.Store(&s.sweepgen, mheap_.sweepgen) 132 } 133 134 // Get a new cached span from the central lists. 135 s = mheap_.central[spc].mcentral.cacheSpan() 136 if s == nil { 137 throw("out of memory") 138 } 139 140 if uintptr(s.allocCount) == s.nelems { 141 throw("span has no free space") 142 } 143 144 // Indicate that this span is cached and prevent asynchronous 145 // sweeping in the next sweep phase. 146 s.sweepgen = mheap_.sweepgen + 3 147 148 c.alloc[spc] = s 149 } 150 151 func (c *mcache) releaseAll() { 152 for i := range c.alloc { 153 s := c.alloc[i] 154 if s != &emptymspan { 155 mheap_.central[i].mcentral.uncacheSpan(s) 156 c.alloc[i] = &emptymspan 157 } 158 } 159 // Clear tinyalloc pool. 160 c.tiny = 0 161 c.tinyoffset = 0 162 } 163 164 // prepareForSweep flushes c if the system has entered a new sweep phase 165 // since c was populated. This must happen between the sweep phase 166 // starting and the first allocation from c. 167 func (c *mcache) prepareForSweep() { 168 // Alternatively, instead of making sure we do this on every P 169 // between starting the world and allocating on that P, we 170 // could leave allocate-black on, allow allocation to continue 171 // as usual, use a ragged barrier at the beginning of sweep to 172 // ensure all cached spans are swept, and then disable 173 // allocate-black. However, with this approach it's difficult 174 // to avoid spilling mark bits into the *next* GC cycle. 175 sg := mheap_.sweepgen 176 if c.flushGen == sg { 177 return 178 } else if c.flushGen != sg-2 { 179 println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg) 180 throw("bad flushGen") 181 } 182 c.releaseAll() 183 stackcache_clear(c) 184 atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart 185 }