github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/mcentral.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Central free lists. 6 // 7 // See malloc.go for an overview. 8 // 9 // The mcentral doesn't actually contain the list of free objects; the mspan does. 10 // Each mcentral is two lists of mspans: those with free objects (c->nonempty) 11 // and those that are completely allocated (c->empty). 12 13 package runtime 14 15 import ( 16 "runtime/internal/atomic" 17 "runtime/internal/sys" 18 ) 19 20 // Central list of free objects of a given size. 21 type mcentral struct { 22 _ sys.NotInHeap 23 spanclass spanClass 24 25 // partial and full contain two mspan sets: one of swept in-use 26 // spans, and one of unswept in-use spans. These two trade 27 // roles on each GC cycle. The unswept set is drained either by 28 // allocation or by the background sweeper in every GC cycle, 29 // so only two roles are necessary. 30 // 31 // sweepgen is increased by 2 on each GC cycle, so the swept 32 // spans are in partial[sweepgen/2%2] and the unswept spans are in 33 // partial[1-sweepgen/2%2]. Sweeping pops spans from the 34 // unswept set and pushes spans that are still in-use on the 35 // swept set. Likewise, allocating an in-use span pushes it 36 // on the swept set. 37 // 38 // Some parts of the sweeper can sweep arbitrary spans, and hence 39 // can't remove them from the unswept set, but will add the span 40 // to the appropriate swept list. As a result, the parts of the 41 // sweeper and mcentral that do consume from the unswept list may 42 // encounter swept spans, and these should be ignored. 43 partial [2]spanSet // list of spans with a free object 44 full [2]spanSet // list of spans with no free objects 45 } 46 47 // Initialize a single central free list. 48 func (c *mcentral) init(spc spanClass) { 49 c.spanclass = spc 50 lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) 51 lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) 52 lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) 53 lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) 54 } 55 56 // partialUnswept returns the spanSet which holds partially-filled 57 // unswept spans for this sweepgen. 58 func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet { 59 return &c.partial[1-sweepgen/2%2] 60 } 61 62 // partialSwept returns the spanSet which holds partially-filled 63 // swept spans for this sweepgen. 64 func (c *mcentral) partialSwept(sweepgen uint32) *spanSet { 65 return &c.partial[sweepgen/2%2] 66 } 67 68 // fullUnswept returns the spanSet which holds unswept spans without any 69 // free slots for this sweepgen. 70 func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet { 71 return &c.full[1-sweepgen/2%2] 72 } 73 74 // fullSwept returns the spanSet which holds swept spans without any 75 // free slots for this sweepgen. 76 func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { 77 return &c.full[sweepgen/2%2] 78 } 79 80 // Allocate a span to use in an mcache. 81 func (c *mcentral) cacheSpan() *mspan { 82 // Deduct credit for this span allocation and sweep if necessary. 83 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize 84 deductSweepCredit(spanBytes, 0) 85 86 traceDone := false 87 if traceEnabled() { 88 traceGCSweepStart() 89 } 90 91 // If we sweep spanBudget spans without finding any free 92 // space, just allocate a fresh span. This limits the amount 93 // of time we can spend trying to find free space and 94 // amortizes the cost of small object sweeping over the 95 // benefit of having a full free span to allocate from. By 96 // setting this to 100, we limit the space overhead to 1%. 97 // 98 // TODO(austin,mknyszek): This still has bad worst-case 99 // throughput. For example, this could find just one free slot 100 // on the 100th swept span. That limits allocation latency, but 101 // still has very poor throughput. We could instead keep a 102 // running free-to-used budget and switch to fresh span 103 // allocation if the budget runs low. 104 spanBudget := 100 105 106 var s *mspan 107 var sl sweepLocker 108 109 // Try partial swept spans first. 110 sg := mheap_.sweepgen 111 if s = c.partialSwept(sg).pop(); s != nil { 112 goto havespan 113 } 114 115 sl = sweep.active.begin() 116 if sl.valid { 117 // Now try partial unswept spans. 118 for ; spanBudget >= 0; spanBudget-- { 119 s = c.partialUnswept(sg).pop() 120 if s == nil { 121 break 122 } 123 if s, ok := sl.tryAcquire(s); ok { 124 // We got ownership of the span, so let's sweep it and use it. 125 s.sweep(true) 126 sweep.active.end(sl) 127 goto havespan 128 } 129 // We failed to get ownership of the span, which means it's being or 130 // has been swept by an asynchronous sweeper that just couldn't remove it 131 // from the unswept list. That sweeper took ownership of the span and 132 // responsibility for either freeing it to the heap or putting it on the 133 // right swept list. Either way, we should just ignore it (and it's unsafe 134 // for us to do anything else). 135 } 136 // Now try full unswept spans, sweeping them and putting them into the 137 // right list if we fail to get a span. 138 for ; spanBudget >= 0; spanBudget-- { 139 s = c.fullUnswept(sg).pop() 140 if s == nil { 141 break 142 } 143 if s, ok := sl.tryAcquire(s); ok { 144 // We got ownership of the span, so let's sweep it. 145 s.sweep(true) 146 // Check if there's any free space. 147 freeIndex := s.nextFreeIndex() 148 if freeIndex != s.nelems { 149 s.freeindex = freeIndex 150 sweep.active.end(sl) 151 goto havespan 152 } 153 // Add it to the swept list, because sweeping didn't give us any free space. 154 c.fullSwept(sg).push(s.mspan) 155 } 156 // See comment for partial unswept spans. 157 } 158 sweep.active.end(sl) 159 } 160 if traceEnabled() { 161 traceGCSweepDone() 162 traceDone = true 163 } 164 165 // We failed to get a span from the mcentral so get one from mheap. 166 s = c.grow() 167 if s == nil { 168 return nil 169 } 170 171 // At this point s is a span that should have free slots. 172 havespan: 173 if traceEnabled() && !traceDone { 174 traceGCSweepDone() 175 } 176 n := int(s.nelems) - int(s.allocCount) 177 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { 178 throw("span has no free objects") 179 } 180 freeByteBase := s.freeindex &^ (64 - 1) 181 whichByte := freeByteBase / 8 182 // Init alloc bits cache. 183 s.refillAllocCache(whichByte) 184 185 // Adjust the allocCache so that s.freeindex corresponds to the low bit in 186 // s.allocCache. 187 s.allocCache >>= s.freeindex % 64 188 189 return s 190 } 191 192 // Return span from an mcache. 193 // 194 // s must have a span class corresponding to this 195 // mcentral and it must not be empty. 196 func (c *mcentral) uncacheSpan(s *mspan) { 197 if s.allocCount == 0 { 198 throw("uncaching span but s.allocCount == 0") 199 } 200 201 sg := mheap_.sweepgen 202 stale := s.sweepgen == sg+1 203 204 // Fix up sweepgen. 205 if stale { 206 // Span was cached before sweep began. It's our 207 // responsibility to sweep it. 208 // 209 // Set sweepgen to indicate it's not cached but needs 210 // sweeping and can't be allocated from. sweep will 211 // set s.sweepgen to indicate s is swept. 212 atomic.Store(&s.sweepgen, sg-1) 213 } else { 214 // Indicate that s is no longer cached. 215 atomic.Store(&s.sweepgen, sg) 216 } 217 218 // Put the span in the appropriate place. 219 if stale { 220 // It's stale, so just sweep it. Sweeping will put it on 221 // the right list. 222 // 223 // We don't use a sweepLocker here. Stale cached spans 224 // aren't in the global sweep lists, so mark termination 225 // itself holds up sweep completion until all mcaches 226 // have been swept. 227 ss := sweepLocked{s} 228 ss.sweep(false) 229 } else { 230 if int(s.nelems)-int(s.allocCount) > 0 { 231 // Put it back on the partial swept list. 232 c.partialSwept(sg).push(s) 233 } else { 234 // There's no free space and it's not stale, so put it on the 235 // full swept list. 236 c.fullSwept(sg).push(s) 237 } 238 } 239 } 240 241 // grow allocates a new empty span from the heap and initializes it for c's size class. 242 func (c *mcentral) grow() *mspan { 243 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) 244 size := uintptr(class_to_size[c.spanclass.sizeclass()]) 245 246 s := mheap_.alloc(npages, c.spanclass) 247 if s == nil { 248 return nil 249 } 250 251 // Use division by multiplication and shifts to quickly compute: 252 // n := (npages << _PageShift) / size 253 n := s.divideByElemSize(npages << _PageShift) 254 s.limit = s.base() + size*n 255 s.initHeapBits(false) 256 return s 257 }