github.com/jonasi/go@v0.0.0-20150930005915-e78e654c1de0/src/runtime/mgcsweep.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: sweeping 6 7 package runtime 8 9 import "unsafe" 10 11 var sweep sweepdata 12 13 // State of background sweep. 14 type sweepdata struct { 15 lock mutex 16 g *g 17 parked bool 18 started bool 19 20 spanidx uint32 // background sweeper position 21 22 nbgsweep uint32 23 npausesweep uint32 24 } 25 26 //go:nowritebarrier 27 func finishsweep_m() { 28 // The world is stopped so we should be able to complete the sweeps 29 // quickly. 30 for sweepone() != ^uintptr(0) { 31 sweep.npausesweep++ 32 } 33 34 // There may be some other spans being swept concurrently that 35 // we need to wait for. If finishsweep_m is done with the world stopped 36 // this code is not required. 37 sg := mheap_.sweepgen 38 for _, s := range work.spans { 39 if s.sweepgen != sg && s.state == _MSpanInUse { 40 mSpan_EnsureSwept(s) 41 } 42 } 43 } 44 45 func bgsweep(c chan int) { 46 sweep.g = getg() 47 48 lock(&sweep.lock) 49 sweep.parked = true 50 c <- 1 51 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1) 52 53 for { 54 for gosweepone() != ^uintptr(0) { 55 sweep.nbgsweep++ 56 Gosched() 57 } 58 lock(&sweep.lock) 59 if !gosweepdone() { 60 // This can happen if a GC runs between 61 // gosweepone returning ^0 above 62 // and the lock being acquired. 63 unlock(&sweep.lock) 64 continue 65 } 66 sweep.parked = true 67 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1) 68 } 69 } 70 71 // sweeps one span 72 // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep 73 //go:nowritebarrier 74 func sweepone() uintptr { 75 _g_ := getg() 76 77 // increment locks to ensure that the goroutine is not preempted 78 // in the middle of sweep thus leaving the span in an inconsistent state for next GC 79 _g_.m.locks++ 80 sg := mheap_.sweepgen 81 for { 82 idx := xadd(&sweep.spanidx, 1) - 1 83 if idx >= uint32(len(work.spans)) { 84 mheap_.sweepdone = 1 85 _g_.m.locks-- 86 return ^uintptr(0) 87 } 88 s := work.spans[idx] 89 if s.state != mSpanInUse { 90 s.sweepgen = sg 91 continue 92 } 93 if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) { 94 continue 95 } 96 npages := s.npages 97 if !mSpan_Sweep(s, false) { 98 npages = 0 99 } 100 _g_.m.locks-- 101 return npages 102 } 103 } 104 105 //go:nowritebarrier 106 func gosweepone() uintptr { 107 var ret uintptr 108 systemstack(func() { 109 ret = sweepone() 110 }) 111 return ret 112 } 113 114 //go:nowritebarrier 115 func gosweepdone() bool { 116 return mheap_.sweepdone != 0 117 } 118 119 // Returns only when span s has been swept. 120 //go:nowritebarrier 121 func mSpan_EnsureSwept(s *mspan) { 122 // Caller must disable preemption. 123 // Otherwise when this function returns the span can become unswept again 124 // (if GC is triggered on another goroutine). 125 _g_ := getg() 126 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { 127 throw("MSpan_EnsureSwept: m is not locked") 128 } 129 130 sg := mheap_.sweepgen 131 if atomicload(&s.sweepgen) == sg { 132 return 133 } 134 // The caller must be sure that the span is a MSpanInUse span. 135 if cas(&s.sweepgen, sg-2, sg-1) { 136 mSpan_Sweep(s, false) 137 return 138 } 139 // unfortunate condition, and we don't have efficient means to wait 140 for atomicload(&s.sweepgen) != sg { 141 osyield() 142 } 143 } 144 145 // Sweep frees or collects finalizers for blocks not marked in the mark phase. 146 // It clears the mark bits in preparation for the next GC round. 147 // Returns true if the span was returned to heap. 148 // If preserve=true, don't return it to heap nor relink in MCentral lists; 149 // caller takes care of it. 150 //TODO go:nowritebarrier 151 func mSpan_Sweep(s *mspan, preserve bool) bool { 152 // It's critical that we enter this function with preemption disabled, 153 // GC must not start while we are in the middle of this function. 154 _g_ := getg() 155 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { 156 throw("MSpan_Sweep: m is not locked") 157 } 158 sweepgen := mheap_.sweepgen 159 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { 160 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") 161 throw("MSpan_Sweep: bad span state") 162 } 163 164 if trace.enabled { 165 traceGCSweepStart() 166 } 167 168 xadd64(&mheap_.pagesSwept, int64(s.npages)) 169 170 cl := s.sizeclass 171 size := s.elemsize 172 res := false 173 nfree := 0 174 175 var head, end gclinkptr 176 177 c := _g_.m.mcache 178 freeToHeap := false 179 180 // Mark any free objects in this span so we don't collect them. 181 sstart := uintptr(s.start << _PageShift) 182 for link := s.freelist; link.ptr() != nil; link = link.ptr().next { 183 if uintptr(link) < sstart || s.limit <= uintptr(link) { 184 // Free list is corrupted. 185 dumpFreeList(s) 186 throw("free list corrupted") 187 } 188 heapBitsForAddr(uintptr(link)).setMarkedNonAtomic() 189 } 190 191 // Unlink & free special records for any objects we're about to free. 192 specialp := &s.specials 193 special := *specialp 194 for special != nil { 195 // A finalizer can be set for an inner byte of an object, find object beginning. 196 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size 197 hbits := heapBitsForAddr(p) 198 if !hbits.isMarked() { 199 // Find the exact byte for which the special was setup 200 // (as opposed to object beginning). 201 p := uintptr(s.start<<_PageShift) + uintptr(special.offset) 202 // about to free object: splice out special record 203 y := special 204 special = special.next 205 *specialp = special 206 if !freespecial(y, unsafe.Pointer(p), size, false) { 207 // stop freeing of object if it has a finalizer 208 hbits.setMarkedNonAtomic() 209 } 210 } else { 211 // object is still live: keep special record 212 specialp = &special.next 213 special = *specialp 214 } 215 } 216 217 // Sweep through n objects of given size starting at p. 218 // This thread owns the span now, so it can manipulate 219 // the block bitmap without atomic operations. 220 221 size, n, _ := s.layout() 222 heapBitsSweepSpan(s.base(), size, n, func(p uintptr) { 223 // At this point we know that we are looking at garbage object 224 // that needs to be collected. 225 if debug.allocfreetrace != 0 { 226 tracefree(unsafe.Pointer(p), size) 227 } 228 229 // Reset to allocated+noscan. 230 if cl == 0 { 231 // Free large span. 232 if preserve { 233 throw("can't preserve large span") 234 } 235 heapBitsForSpan(p).initSpan(s.layout()) 236 s.needzero = 1 237 238 // Free the span after heapBitsSweepSpan 239 // returns, since it's not done with the span. 240 freeToHeap = true 241 } else { 242 // Free small object. 243 if size > 2*ptrSize { 244 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed" 245 } else if size > ptrSize { 246 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0 247 } 248 if head.ptr() == nil { 249 head = gclinkptr(p) 250 } else { 251 end.ptr().next = gclinkptr(p) 252 } 253 end = gclinkptr(p) 254 end.ptr().next = gclinkptr(0x0bade5) 255 nfree++ 256 } 257 }) 258 259 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept, 260 // because of the potential for a concurrent free/SetFinalizer. 261 // But we need to set it before we make the span available for allocation 262 // (return it to heap or mcentral), because allocation code assumes that a 263 // span is already swept if available for allocation. 264 if freeToHeap || nfree == 0 { 265 // The span must be in our exclusive ownership until we update sweepgen, 266 // check for potential races. 267 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { 268 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") 269 throw("MSpan_Sweep: bad span state after sweep") 270 } 271 atomicstore(&s.sweepgen, sweepgen) 272 } 273 if nfree > 0 { 274 c.local_nsmallfree[cl] += uintptr(nfree) 275 res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve) 276 // MCentral_FreeSpan updates sweepgen 277 } else if freeToHeap { 278 // Free large span to heap 279 280 // NOTE(rsc,dvyukov): The original implementation of efence 281 // in CL 22060046 used SysFree instead of SysFault, so that 282 // the operating system would eventually give the memory 283 // back to us again, so that an efence program could run 284 // longer without running out of memory. Unfortunately, 285 // calling SysFree here without any kind of adjustment of the 286 // heap data structures means that when the memory does 287 // come back to us, we have the wrong metadata for it, either in 288 // the MSpan structures or in the garbage collection bitmap. 289 // Using SysFault here means that the program will run out of 290 // memory fairly quickly in efence mode, but at least it won't 291 // have mysterious crashes due to confused memory reuse. 292 // It should be possible to switch back to SysFree if we also 293 // implement and then call some kind of MHeap_DeleteSpan. 294 if debug.efence > 0 { 295 s.limit = 0 // prevent mlookup from finding this span 296 sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size) 297 } else { 298 mHeap_Free(&mheap_, s, 1) 299 } 300 c.local_nlargefree++ 301 c.local_largefree += size 302 res = true 303 } 304 if trace.enabled { 305 traceGCSweepDone() 306 } 307 return res 308 } 309 310 // deductSweepCredit deducts sweep credit for allocating a span of 311 // size spanBytes. This must be performed *before* the span is 312 // allocated to ensure the system has enough credit. If necessary, it 313 // performs sweeping to prevent going in to debt. If the caller will 314 // also sweep pages (e.g., for a large allocation), it can pass a 315 // non-zero callerSweepPages to leave that many pages unswept. 316 // 317 // deductSweepCredit is the core of the "proportional sweep" system. 318 // It uses statistics gathered by the garbage collector to perform 319 // enough sweeping so that all pages are swept during the concurrent 320 // sweep phase between GC cycles. 321 // 322 // mheap_ must NOT be locked. 323 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { 324 if mheap_.sweepPagesPerByte == 0 { 325 // Proportional sweep is done or disabled. 326 return 327 } 328 329 // Account for this span allocation. 330 spanBytesAlloc := xadd64(&mheap_.spanBytesAlloc, int64(spanBytes)) 331 332 // Fix debt if necessary. 333 pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc)) 334 for pagesOwed-int64(atomicload64(&mheap_.pagesSwept)) > int64(callerSweepPages) { 335 if gosweepone() == ^uintptr(0) { 336 mheap_.sweepPagesPerByte = 0 337 break 338 } 339 } 340 } 341 342 func dumpFreeList(s *mspan) { 343 printlock() 344 print("runtime: free list of span ", s, ":\n") 345 sstart := uintptr(s.start << _PageShift) 346 link := s.freelist 347 for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ { 348 if i != 0 { 349 print(" -> ") 350 } 351 print(hex(link)) 352 if link.ptr() == nil { 353 break 354 } 355 if uintptr(link) < sstart || s.limit <= uintptr(link) { 356 // Bad link. Stop walking before we crash. 357 print(" (BAD)") 358 break 359 } 360 link = link.ptr().next 361 } 362 print("\n") 363 printunlock() 364 }