github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/mcache.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "unsafe" 10 ) 11 12 // Per-thread (in Go, per-P) cache for small objects. 13 // This includes a small object cache and local allocation stats. 14 // No locking needed because it is per-thread (per-P). 15 // 16 // mcaches are allocated from non-GC'd memory, so any heap pointers 17 // must be specially handled. 18 // 19 //go:notinheap 20 type mcache struct { 21 // The following members are accessed on every malloc, 22 // so they are grouped here for better caching. 23 nextSample uintptr // trigger heap sample after allocating this many bytes 24 scanAlloc uintptr // bytes of scannable heap allocated 25 26 // Allocator cache for tiny objects w/o pointers. 27 // See "Tiny allocator" comment in malloc.go. 28 29 // tiny points to the beginning of the current tiny block, or 30 // nil if there is no current tiny block. 31 // 32 // tiny is a heap pointer. Since mcache is in non-GC'd memory, 33 // we handle it by clearing it in releaseAll during mark 34 // termination. 35 // 36 // tinyAllocs is the number of tiny allocations performed 37 // by the P that owns this mcache. 38 tiny uintptr 39 tinyoffset uintptr 40 tinyAllocs uintptr 41 42 // The rest is not accessed on every malloc. 43 44 alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass 45 46 stackcache [_NumStackOrders]stackfreelist 47 48 // flushGen indicates the sweepgen during which this mcache 49 // was last flushed. If flushGen != mheap_.sweepgen, the spans 50 // in this mcache are stale and need to the flushed so they 51 // can be swept. This is done in acquirep. 52 flushGen uint32 53 } 54 55 // A gclink is a node in a linked list of blocks, like mlink, 56 // but it is opaque to the garbage collector. 57 // The GC does not trace the pointers during collection, 58 // and the compiler does not emit write barriers for assignments 59 // of gclinkptr values. Code should store references to gclinks 60 // as gclinkptr, not as *gclink. 61 type gclink struct { 62 next gclinkptr 63 } 64 65 // A gclinkptr is a pointer to a gclink, but it is opaque 66 // to the garbage collector. 67 type gclinkptr uintptr 68 69 // ptr returns the *gclink form of p. 70 // The result should be used for accessing fields, not stored 71 // in other data structures. 72 func (p gclinkptr) ptr() *gclink { 73 return (*gclink)(unsafe.Pointer(p)) 74 } 75 76 type stackfreelist struct { 77 list gclinkptr // linked list of free stacks 78 size uintptr // total size of stacks in list 79 } 80 81 // dummy mspan that contains no free objects. 82 var emptymspan mspan 83 84 func allocmcache() *mcache { 85 var c *mcache 86 systemstack(func() { 87 lock(&mheap_.lock) 88 c = (*mcache)(mheap_.cachealloc.alloc()) 89 c.flushGen = mheap_.sweepgen 90 unlock(&mheap_.lock) 91 }) 92 for i := range c.alloc { 93 c.alloc[i] = &emptymspan 94 } 95 c.nextSample = nextSample() 96 return c 97 } 98 99 // freemcache releases resources associated with this 100 // mcache and puts the object onto a free list. 101 // 102 // In some cases there is no way to simply release 103 // resources, such as statistics, so donate them to 104 // a different mcache (the recipient). 105 func freemcache(c *mcache) { 106 systemstack(func() { 107 c.releaseAll() 108 stackcache_clear(c) 109 110 // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate 111 // with the stealing of gcworkbufs during garbage collection to avoid 112 // a race where the workbuf is double-freed. 113 // gcworkbuffree(c.gcworkbuf) 114 115 lock(&mheap_.lock) 116 mheap_.cachealloc.free(unsafe.Pointer(c)) 117 unlock(&mheap_.lock) 118 }) 119 } 120 121 // getMCache is a convenience function which tries to obtain an mcache. 122 // 123 // Returns nil if we're not bootstrapping or we don't have a P. The caller's 124 // P must not change, so we must be in a non-preemptible state. 125 func getMCache() *mcache { 126 // Grab the mcache, since that's where stats live. 127 pp := getg().m.p.ptr() 128 var c *mcache 129 if pp == nil { 130 // We will be called without a P while bootstrapping, 131 // in which case we use mcache0, which is set in mallocinit. 132 // mcache0 is cleared when bootstrapping is complete, 133 // by procresize. 134 c = mcache0 135 } else { 136 c = pp.mcache 137 } 138 return c 139 } 140 141 // refill acquires a new span of span class spc for c. This span will 142 // have at least one free object. The current span in c must be full. 143 // 144 // Must run in a non-preemptible context since otherwise the owner of 145 // c could change. 146 func (c *mcache) refill(spc spanClass) { 147 // Return the current cached span to the central lists. 148 s := c.alloc[spc] 149 150 if uintptr(s.allocCount) != s.nelems { 151 throw("refill of span with free space remaining") 152 } 153 if s != &emptymspan { 154 // Mark this span as no longer cached. 155 if s.sweepgen != mheap_.sweepgen+3 { 156 throw("bad sweepgen in refill") 157 } 158 mheap_.central[spc].mcentral.uncacheSpan(s) 159 } 160 161 // Get a new cached span from the central lists. 162 s = mheap_.central[spc].mcentral.cacheSpan() 163 if s == nil { 164 throw("out of memory") 165 } 166 167 if uintptr(s.allocCount) == s.nelems { 168 throw("span has no free space") 169 } 170 171 // Indicate that this span is cached and prevent asynchronous 172 // sweeping in the next sweep phase. 173 s.sweepgen = mheap_.sweepgen + 3 174 175 // Assume all objects from this span will be allocated in the 176 // mcache. If it gets uncached, we'll adjust this. 177 stats := memstats.heapStats.acquire() 178 atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount)) 179 memstats.heapStats.release() 180 181 // Update heap_live with the same assumption. 182 usedBytes := uintptr(s.allocCount) * s.elemsize 183 atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes)) 184 185 // Flush tinyAllocs. 186 if spc == tinySpanClass { 187 atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs)) 188 c.tinyAllocs = 0 189 } 190 191 // While we're here, flush scanAlloc, since we have to call 192 // revise anyway. 193 atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc)) 194 c.scanAlloc = 0 195 196 if trace.enabled { 197 // heap_live changed. 198 traceHeapAlloc() 199 } 200 if gcBlackenEnabled != 0 { 201 // heap_live and heap_scan changed. 202 gcController.revise() 203 } 204 205 c.alloc[spc] = s 206 } 207 208 // allocLarge allocates a span for a large object. 209 func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan { 210 if size+_PageSize < size { 211 throw("out of memory") 212 } 213 npages := size >> _PageShift 214 if size&_PageMask != 0 { 215 npages++ 216 } 217 218 // Deduct credit for this span allocation and sweep if 219 // necessary. mHeap_Alloc will also sweep npages, so this only 220 // pays the debt down to npage pages. 221 deductSweepCredit(npages*_PageSize, npages) 222 223 spc := makeSpanClass(0, noscan) 224 s := mheap_.alloc(npages, spc, needzero) 225 if s == nil { 226 throw("out of memory") 227 } 228 stats := memstats.heapStats.acquire() 229 atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize) 230 atomic.Xadduintptr(&stats.largeAllocCount, 1) 231 memstats.heapStats.release() 232 233 // Update heap_live and revise pacing if needed. 234 atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize)) 235 if trace.enabled { 236 // Trace that a heap alloc occurred because heap_live changed. 237 traceHeapAlloc() 238 } 239 if gcBlackenEnabled != 0 { 240 gcController.revise() 241 } 242 243 // Put the large span in the mcentral swept list so that it's 244 // visible to the background sweeper. 245 mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) 246 s.limit = s.base() + size 247 heapBitsForAddr(s.base()).initSpan(s) 248 return s 249 } 250 251 func (c *mcache) releaseAll() { 252 // Take this opportunity to flush scanAlloc. 253 atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc)) 254 c.scanAlloc = 0 255 256 sg := mheap_.sweepgen 257 for i := range c.alloc { 258 s := c.alloc[i] 259 if s != &emptymspan { 260 // Adjust nsmallalloc in case the span wasn't fully allocated. 261 n := uintptr(s.nelems) - uintptr(s.allocCount) 262 stats := memstats.heapStats.acquire() 263 atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) 264 memstats.heapStats.release() 265 if s.sweepgen != sg+1 { 266 // refill conservatively counted unallocated slots in heap_live. 267 // Undo this. 268 // 269 // If this span was cached before sweep, then 270 // heap_live was totally recomputed since 271 // caching this span, so we don't do this for 272 // stale spans. 273 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) 274 } 275 // Release the span to the mcentral. 276 mheap_.central[i].mcentral.uncacheSpan(s) 277 c.alloc[i] = &emptymspan 278 } 279 } 280 // Clear tinyalloc pool. 281 c.tiny = 0 282 c.tinyoffset = 0 283 atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs)) 284 c.tinyAllocs = 0 285 286 // Updated heap_scan and possible heap_live. 287 if gcBlackenEnabled != 0 { 288 gcController.revise() 289 } 290 } 291 292 // prepareForSweep flushes c if the system has entered a new sweep phase 293 // since c was populated. This must happen between the sweep phase 294 // starting and the first allocation from c. 295 func (c *mcache) prepareForSweep() { 296 // Alternatively, instead of making sure we do this on every P 297 // between starting the world and allocating on that P, we 298 // could leave allocate-black on, allow allocation to continue 299 // as usual, use a ragged barrier at the beginning of sweep to 300 // ensure all cached spans are swept, and then disable 301 // allocate-black. However, with this approach it's difficult 302 // to avoid spilling mark bits into the *next* GC cycle. 303 sg := mheap_.sweepgen 304 if c.flushGen == sg { 305 return 306 } else if c.flushGen != sg-2 { 307 println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg) 308 throw("bad flushGen") 309 } 310 c.releaseAll() 311 stackcache_clear(c) 312 atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart 313 }