github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/runtime/mheap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // minPhysPageSize is a lower-bound on the physical page size. The 18 // true physical page size may be larger than this. In contrast, 19 // sys.PhysPageSize is an upper-bound on the physical page size. 20 const minPhysPageSize = 4096 21 22 // Main malloc heap. 23 // The heap itself is the "free[]" and "large" arrays, 24 // but all the other global data is here too. 25 // 26 // mheap must not be heap-allocated because it contains mSpanLists, 27 // which must not be heap-allocated. 28 // 29 //go:notinheap 30 type mheap struct { 31 lock mutex 32 free [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList 33 freelarge mTreap // free treap of length >= _MaxMHeapList 34 busy [_MaxMHeapList]mSpanList // busy lists of large spans of given length 35 busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList 36 sweepgen uint32 // sweep generation, see comment in mspan 37 sweepdone uint32 // all spans are swept 38 sweepers uint32 // number of active sweepone calls 39 40 // allspans is a slice of all mspans ever created. Each mspan 41 // appears exactly once. 42 // 43 // The memory for allspans is manually managed and can be 44 // reallocated and move as the heap grows. 45 // 46 // In general, allspans is protected by mheap_.lock, which 47 // prevents concurrent access as well as freeing the backing 48 // store. Accesses during STW might not hold the lock, but 49 // must ensure that allocation cannot happen around the 50 // access (since that may free the backing store). 51 allspans []*mspan // all spans out there 52 53 // spans is a lookup table to map virtual address page IDs to *mspan. 54 // For allocated spans, their pages map to the span itself. 55 // For free spans, only the lowest and highest pages map to the span itself. 56 // Internal pages map to an arbitrary span. 57 // For pages that have never been allocated, spans entries are nil. 58 // 59 // Modifications are protected by mheap.lock. Reads can be 60 // performed without locking, but ONLY from indexes that are 61 // known to contain in-use or stack spans. This means there 62 // must not be a safe-point between establishing that an 63 // address is live and looking it up in the spans array. 64 // 65 // This is backed by a reserved region of the address space so 66 // it can grow without moving. The memory up to len(spans) is 67 // mapped. cap(spans) indicates the total reserved memory. 68 spans []*mspan 69 70 // sweepSpans contains two mspan stacks: one of swept in-use 71 // spans, and one of unswept in-use spans. These two trade 72 // roles on each GC cycle. Since the sweepgen increases by 2 73 // on each cycle, this means the swept spans are in 74 // sweepSpans[sweepgen/2%2] and the unswept spans are in 75 // sweepSpans[1-sweepgen/2%2]. Sweeping pops spans from the 76 // unswept stack and pushes spans that are still in-use on the 77 // swept stack. Likewise, allocating an in-use span pushes it 78 // on the swept stack. 79 sweepSpans [2]gcSweepBuf 80 81 _ uint32 // align uint64 fields on 32-bit for atomics 82 83 // Proportional sweep 84 // 85 // These parameters represent a linear function from heap_live 86 // to page sweep count. The proportional sweep system works to 87 // stay in the black by keeping the current page sweep count 88 // above this line at the current heap_live. 89 // 90 // The line has slope sweepPagesPerByte and passes through a 91 // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At 92 // any given time, the system is at (memstats.heap_live, 93 // pagesSwept) in this space. 94 // 95 // It's important that the line pass through a point we 96 // control rather than simply starting at a (0,0) origin 97 // because that lets us adjust sweep pacing at any time while 98 // accounting for current progress. If we could only adjust 99 // the slope, it would create a discontinuity in debt if any 100 // progress has already been made. 101 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock 102 pagesSwept uint64 // pages swept this cycle; updated atomically 103 pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically 104 sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without 105 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 106 // TODO(austin): pagesInUse should be a uintptr, but the 386 107 // compiler can't 8-byte align fields. 108 109 // Malloc stats. 110 largealloc uint64 // bytes allocated for large objects 111 nlargealloc uint64 // number of large object allocations 112 largefree uint64 // bytes freed for large objects (>maxsmallsize) 113 nlargefree uint64 // number of frees for large objects (>maxsmallsize) 114 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) 115 116 // range of addresses we might see in the heap 117 bitmap uintptr // Points to one byte past the end of the bitmap 118 bitmap_mapped uintptr 119 120 // The arena_* fields indicate the addresses of the Go heap. 121 // 122 // The maximum range of the Go heap is 123 // [arena_start, arena_start+_MaxMem+1). 124 // 125 // The range of the current Go heap is 126 // [arena_start, arena_used). Parts of this range may not be 127 // mapped, but the metadata structures are always mapped for 128 // the full range. 129 arena_start uintptr 130 arena_used uintptr // Set with setArenaUsed. 131 132 // The heap is grown using a linear allocator that allocates 133 // from the block [arena_alloc, arena_end). arena_alloc is 134 // often, but *not always* equal to arena_used. 135 arena_alloc uintptr 136 arena_end uintptr 137 138 // arena_reserved indicates that the memory [arena_alloc, 139 // arena_end) is reserved (e.g., mapped PROT_NONE). If this is 140 // false, we have to be careful not to clobber existing 141 // mappings here. If this is true, then we own the mapping 142 // here and *must* clobber it to use it. 143 arena_reserved bool 144 145 _ uint32 // ensure 64-bit alignment 146 147 // central free lists for small size classes. 148 // the padding makes sure that the MCentrals are 149 // spaced CacheLineSize bytes apart, so that each MCentral.lock 150 // gets its own cache line. 151 // central is indexed by spanClass. 152 central [numSpanClasses]struct { 153 mcentral mcentral 154 pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte 155 } 156 157 spanalloc fixalloc // allocator for span* 158 cachealloc fixalloc // allocator for mcache* 159 treapalloc fixalloc // allocator for treapNodes* used by large objects 160 specialfinalizeralloc fixalloc // allocator for specialfinalizer* 161 specialprofilealloc fixalloc // allocator for specialprofile* 162 speciallock mutex // lock for special record allocators. 163 164 unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF 165 } 166 167 var mheap_ mheap 168 169 // An MSpan is a run of pages. 170 // 171 // When a MSpan is in the heap free list, state == MSpanFree 172 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 173 // 174 // When a MSpan is allocated, state == MSpanInUse or MSpanManual 175 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 176 177 // Every MSpan is in one doubly-linked list, 178 // either one of the MHeap's free lists or one of the 179 // MCentral's span lists. 180 181 // An MSpan representing actual memory has state _MSpanInUse, 182 // _MSpanManual, or _MSpanFree. Transitions between these states are 183 // constrained as follows: 184 // 185 // * A span may transition from free to in-use or manual during any GC 186 // phase. 187 // 188 // * During sweeping (gcphase == _GCoff), a span may transition from 189 // in-use to free (as a result of sweeping) or manual to free (as a 190 // result of stacks being freed). 191 // 192 // * During GC (gcphase != _GCoff), a span *must not* transition from 193 // manual or in-use to free. Because concurrent GC may read a pointer 194 // and then look up its span, the span state must be monotonic. 195 type mSpanState uint8 196 197 const ( 198 _MSpanDead mSpanState = iota 199 _MSpanInUse // allocated for garbage collected heap 200 _MSpanManual // allocated for manual management (e.g., stack allocator) 201 _MSpanFree 202 ) 203 204 // mSpanStateNames are the names of the span states, indexed by 205 // mSpanState. 206 var mSpanStateNames = []string{ 207 "_MSpanDead", 208 "_MSpanInUse", 209 "_MSpanManual", 210 "_MSpanFree", 211 } 212 213 // mSpanList heads a linked list of spans. 214 // 215 //go:notinheap 216 type mSpanList struct { 217 first *mspan // first span in list, or nil if none 218 last *mspan // last span in list, or nil if none 219 } 220 221 //go:notinheap 222 type mspan struct { 223 next *mspan // next span in list, or nil if none 224 prev *mspan // previous span in list, or nil if none 225 list *mSpanList // For debugging. TODO: Remove. 226 227 startAddr uintptr // address of first byte of span aka s.base() 228 npages uintptr // number of pages in span 229 230 manualFreeList gclinkptr // list of free objects in _MSpanManual spans 231 232 // freeindex is the slot index between 0 and nelems at which to begin scanning 233 // for the next free object in this span. 234 // Each allocation scans allocBits starting at freeindex until it encounters a 0 235 // indicating a free object. freeindex is then adjusted so that subsequent scans begin 236 // just past the newly discovered free object. 237 // 238 // If freeindex == nelem, this span has no free objects. 239 // 240 // allocBits is a bitmap of objects in this span. 241 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 242 // then object n is free; 243 // otherwise, object n is allocated. Bits starting at nelem are 244 // undefined and should never be referenced. 245 // 246 // Object n starts at address n*elemsize + (start << pageShift). 247 freeindex uintptr 248 // TODO: Look up nelems from sizeclass and remove this field if it 249 // helps performance. 250 nelems uintptr // number of object in the span. 251 252 // Cache of the allocBits at freeindex. allocCache is shifted 253 // such that the lowest bit corresponds to the bit freeindex. 254 // allocCache holds the complement of allocBits, thus allowing 255 // ctz (count trailing zero) to use it directly. 256 // allocCache may contain bits beyond s.nelems; the caller must ignore 257 // these. 258 allocCache uint64 259 260 // allocBits and gcmarkBits hold pointers to a span's mark and 261 // allocation bits. The pointers are 8 byte aligned. 262 // There are three arenas where this data is held. 263 // free: Dirty arenas that are no longer accessed 264 // and can be reused. 265 // next: Holds information to be used in the next GC cycle. 266 // current: Information being used during this GC cycle. 267 // previous: Information being used during the last GC cycle. 268 // A new GC cycle starts with the call to finishsweep_m. 269 // finishsweep_m moves the previous arena to the free arena, 270 // the current arena to the previous arena, and 271 // the next arena to the current arena. 272 // The next arena is populated as the spans request 273 // memory to hold gcmarkBits for the next GC cycle as well 274 // as allocBits for newly allocated spans. 275 // 276 // The pointer arithmetic is done "by hand" instead of using 277 // arrays to avoid bounds checks along critical performance 278 // paths. 279 // The sweep will free the old allocBits and set allocBits to the 280 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed 281 // out memory. 282 allocBits *gcBits 283 gcmarkBits *gcBits 284 285 // sweep generation: 286 // if sweepgen == h->sweepgen - 2, the span needs sweeping 287 // if sweepgen == h->sweepgen - 1, the span is currently being swept 288 // if sweepgen == h->sweepgen, the span is swept and ready to use 289 // h->sweepgen is incremented by 2 after every GC 290 291 sweepgen uint32 292 divMul uint16 // for divide by elemsize - divMagic.mul 293 baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base 294 allocCount uint16 // number of allocated objects 295 spanclass spanClass // size class and noscan (uint8) 296 incache bool // being used by an mcache 297 state mSpanState // mspaninuse etc 298 needzero uint8 // needs to be zeroed before allocation 299 divShift uint8 // for divide by elemsize - divMagic.shift 300 divShift2 uint8 // for divide by elemsize - divMagic.shift2 301 elemsize uintptr // computed from sizeclass or from npages 302 unusedsince int64 // first time spotted by gc in mspanfree state 303 npreleased uintptr // number of pages released to the os 304 limit uintptr // end of data in span 305 speciallock mutex // guards specials list 306 specials *special // linked list of special records sorted by offset. 307 } 308 309 func (s *mspan) base() uintptr { 310 return s.startAddr 311 } 312 313 func (s *mspan) layout() (size, n, total uintptr) { 314 total = s.npages << _PageShift 315 size = s.elemsize 316 if size > 0 { 317 n = total / size 318 } 319 return 320 } 321 322 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 323 h := (*mheap)(vh) 324 s := (*mspan)(p) 325 if len(h.allspans) >= cap(h.allspans) { 326 n := 64 * 1024 / sys.PtrSize 327 if n < cap(h.allspans)*3/2 { 328 n = cap(h.allspans) * 3 / 2 329 } 330 var new []*mspan 331 sp := (*slice)(unsafe.Pointer(&new)) 332 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys) 333 if sp.array == nil { 334 throw("runtime: cannot allocate memory") 335 } 336 sp.len = len(h.allspans) 337 sp.cap = n 338 if len(h.allspans) > 0 { 339 copy(new, h.allspans) 340 } 341 oldAllspans := h.allspans 342 h.allspans = new 343 if len(oldAllspans) != 0 { 344 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys) 345 } 346 } 347 h.allspans = append(h.allspans, s) 348 } 349 350 // A spanClass represents the size class and noscan-ness of a span. 351 // 352 // Each size class has a noscan spanClass and a scan spanClass. The 353 // noscan spanClass contains only noscan objects, which do not contain 354 // pointers and thus do not need to be scanned by the garbage 355 // collector. 356 type spanClass uint8 357 358 const ( 359 numSpanClasses = _NumSizeClasses << 1 360 tinySpanClass = spanClass(tinySizeClass<<1 | 1) 361 ) 362 363 func makeSpanClass(sizeclass uint8, noscan bool) spanClass { 364 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan)) 365 } 366 367 func (sc spanClass) sizeclass() int8 { 368 return int8(sc >> 1) 369 } 370 371 func (sc spanClass) noscan() bool { 372 return sc&1 != 0 373 } 374 375 // inheap reports whether b is a pointer into a (potentially dead) heap object. 376 // It returns false for pointers into _MSpanManual spans. 377 // Non-preemptible because it is used by write barriers. 378 //go:nowritebarrier 379 //go:nosplit 380 func inheap(b uintptr) bool { 381 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 382 return false 383 } 384 // Not a beginning of a block, consult span table to find the block beginning. 385 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] 386 if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse { 387 return false 388 } 389 return true 390 } 391 392 // inHeapOrStack is a variant of inheap that returns true for pointers 393 // into any allocated heap span. 394 // 395 //go:nowritebarrier 396 //go:nosplit 397 func inHeapOrStack(b uintptr) bool { 398 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 399 return false 400 } 401 // Not a beginning of a block, consult span table to find the block beginning. 402 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] 403 if s == nil || b < s.base() { 404 return false 405 } 406 switch s.state { 407 case mSpanInUse, _MSpanManual: 408 return b < s.limit 409 default: 410 return false 411 } 412 } 413 414 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. 415 // Use the functions instead. 416 417 // spanOf returns the span of p. If p does not point into the heap or 418 // no span contains p, spanOf returns nil. 419 func spanOf(p uintptr) *mspan { 420 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { 421 return nil 422 } 423 return spanOfUnchecked(p) 424 } 425 426 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 427 // that p points into the heap (that is, mheap_.arena_start <= p < 428 // mheap_.arena_used). 429 func spanOfUnchecked(p uintptr) *mspan { 430 return mheap_.spans[(p-mheap_.arena_start)>>_PageShift] 431 } 432 433 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { 434 _g_ := getg() 435 436 _g_.m.mcache.local_nlookup++ 437 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 { 438 // purge cache stats to prevent overflow 439 lock(&mheap_.lock) 440 purgecachedstats(_g_.m.mcache) 441 unlock(&mheap_.lock) 442 } 443 444 s := mheap_.lookupMaybe(unsafe.Pointer(v)) 445 if sp != nil { 446 *sp = s 447 } 448 if s == nil { 449 if base != nil { 450 *base = 0 451 } 452 if size != nil { 453 *size = 0 454 } 455 return 0 456 } 457 458 p := s.base() 459 if s.spanclass.sizeclass() == 0 { 460 // Large object. 461 if base != nil { 462 *base = p 463 } 464 if size != nil { 465 *size = s.npages << _PageShift 466 } 467 return 1 468 } 469 470 n := s.elemsize 471 if base != nil { 472 i := (v - p) / n 473 *base = p + i*n 474 } 475 if size != nil { 476 *size = n 477 } 478 479 return 1 480 } 481 482 // Initialize the heap. 483 func (h *mheap) init(spansStart, spansBytes uintptr) { 484 h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys) 485 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 486 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 487 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 488 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 489 490 // Don't zero mspan allocations. Background sweeping can 491 // inspect a span concurrently with allocating it, so it's 492 // important that the span's sweepgen survive across freeing 493 // and re-allocating a span to prevent background sweeping 494 // from improperly cas'ing it from 0. 495 // 496 // This is safe because mspan contains no heap pointers. 497 h.spanalloc.zero = false 498 499 // h->mapcache needs no init 500 for i := range h.free { 501 h.free[i].init() 502 h.busy[i].init() 503 } 504 505 h.busylarge.init() 506 for i := range h.central { 507 h.central[i].mcentral.init(spanClass(i)) 508 } 509 510 sp := (*slice)(unsafe.Pointer(&h.spans)) 511 sp.array = unsafe.Pointer(spansStart) 512 sp.len = 0 513 sp.cap = int(spansBytes / sys.PtrSize) 514 515 // Map metadata structures. But don't map race detector memory 516 // since we're not actually growing the arena here (and TSAN 517 // gets mad if you map 0 bytes). 518 h.setArenaUsed(h.arena_used, false) 519 } 520 521 // setArenaUsed extends the usable arena to address arena_used and 522 // maps auxiliary VM regions for any newly usable arena space. 523 // 524 // racemap indicates that this memory should be managed by the race 525 // detector. racemap should be true unless this is covering a VM hole. 526 func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) { 527 // Map auxiliary structures *before* h.arena_used is updated. 528 // Waiting to update arena_used until after the memory has been mapped 529 // avoids faults when other threads try access these regions immediately 530 // after observing the change to arena_used. 531 532 // Map the bitmap. 533 h.mapBits(arena_used) 534 535 // Map spans array. 536 h.mapSpans(arena_used) 537 538 // Tell the race detector about the new heap memory. 539 if racemap && raceenabled { 540 racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used) 541 } 542 543 h.arena_used = arena_used 544 } 545 546 // mapSpans makes sure that the spans are mapped 547 // up to the new value of arena_used. 548 // 549 // Don't call this directly. Call mheap.setArenaUsed. 550 func (h *mheap) mapSpans(arena_used uintptr) { 551 // Map spans array, PageSize at a time. 552 n := arena_used 553 n -= h.arena_start 554 n = n / _PageSize * sys.PtrSize 555 n = round(n, physPageSize) 556 need := n / unsafe.Sizeof(h.spans[0]) 557 have := uintptr(len(h.spans)) 558 if have >= need { 559 return 560 } 561 h.spans = h.spans[:need] 562 sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys) 563 } 564 565 // Sweeps spans in list until reclaims at least npages into heap. 566 // Returns the actual number of pages reclaimed. 567 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { 568 n := uintptr(0) 569 sg := mheap_.sweepgen 570 retry: 571 for s := list.first; s != nil; s = s.next { 572 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { 573 list.remove(s) 574 // swept spans are at the end of the list 575 list.insertBack(s) // Puts it back on a busy list. s is not in the treap at this point. 576 unlock(&h.lock) 577 snpages := s.npages 578 if s.sweep(false) { 579 n += snpages 580 } 581 lock(&h.lock) 582 if n >= npages { 583 return n 584 } 585 // the span could have been moved elsewhere 586 goto retry 587 } 588 if s.sweepgen == sg-1 { 589 // the span is being sweept by background sweeper, skip 590 continue 591 } 592 // already swept empty span, 593 // all subsequent ones must also be either swept or in process of sweeping 594 break 595 } 596 return n 597 } 598 599 // Sweeps and reclaims at least npage pages into heap. 600 // Called before allocating npage pages. 601 func (h *mheap) reclaim(npage uintptr) { 602 // First try to sweep busy spans with large objects of size >= npage, 603 // this has good chances of reclaiming the necessary space. 604 for i := int(npage); i < len(h.busy); i++ { 605 if h.reclaimList(&h.busy[i], npage) != 0 { 606 return // Bingo! 607 } 608 } 609 610 // Then -- even larger objects. 611 if h.reclaimList(&h.busylarge, npage) != 0 { 612 return // Bingo! 613 } 614 615 // Now try smaller objects. 616 // One such object is not enough, so we need to reclaim several of them. 617 reclaimed := uintptr(0) 618 for i := 0; i < int(npage) && i < len(h.busy); i++ { 619 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) 620 if reclaimed >= npage { 621 return 622 } 623 } 624 625 // Now sweep everything that is not yet swept. 626 unlock(&h.lock) 627 for { 628 n := sweepone() 629 if n == ^uintptr(0) { // all spans are swept 630 break 631 } 632 reclaimed += n 633 if reclaimed >= npage { 634 break 635 } 636 } 637 lock(&h.lock) 638 } 639 640 // Allocate a new span of npage pages from the heap for GC'd memory 641 // and record its size class in the HeapMap and HeapMapCache. 642 func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { 643 _g_ := getg() 644 if _g_ != _g_.m.g0 { 645 throw("_mheap_alloc not on g0 stack") 646 } 647 lock(&h.lock) 648 649 // To prevent excessive heap growth, before allocating n pages 650 // we need to sweep and reclaim at least n pages. 651 if h.sweepdone == 0 { 652 // TODO(austin): This tends to sweep a large number of 653 // spans in order to find a few completely free spans 654 // (for example, in the garbage benchmark, this sweeps 655 // ~30x the number of pages its trying to allocate). 656 // If GC kept a bit for whether there were any marks 657 // in a span, we could release these free spans 658 // at the end of GC and eliminate this entirely. 659 if trace.enabled { 660 traceGCSweepStart() 661 } 662 h.reclaim(npage) 663 if trace.enabled { 664 traceGCSweepDone() 665 } 666 } 667 668 // transfer stats from cache to global 669 memstats.heap_scan += uint64(_g_.m.mcache.local_scan) 670 _g_.m.mcache.local_scan = 0 671 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) 672 _g_.m.mcache.local_tinyallocs = 0 673 674 s := h.allocSpanLocked(npage, &memstats.heap_inuse) 675 if s != nil { 676 // Record span info, because gc needs to be 677 // able to map interior pointer to containing span. 678 atomic.Store(&s.sweepgen, h.sweepgen) 679 h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list. 680 s.state = _MSpanInUse 681 s.allocCount = 0 682 s.spanclass = spanclass 683 if sizeclass := spanclass.sizeclass(); sizeclass == 0 { 684 s.elemsize = s.npages << _PageShift 685 s.divShift = 0 686 s.divMul = 0 687 s.divShift2 = 0 688 s.baseMask = 0 689 } else { 690 s.elemsize = uintptr(class_to_size[sizeclass]) 691 m := &class_to_divmagic[sizeclass] 692 s.divShift = m.shift 693 s.divMul = m.mul 694 s.divShift2 = m.shift2 695 s.baseMask = m.baseMask 696 } 697 698 // update stats, sweep lists 699 h.pagesInUse += uint64(npage) 700 if large { 701 memstats.heap_objects++ 702 mheap_.largealloc += uint64(s.elemsize) 703 mheap_.nlargealloc++ 704 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) 705 // Swept spans are at the end of lists. 706 if s.npages < uintptr(len(h.busy)) { 707 h.busy[s.npages].insertBack(s) 708 } else { 709 h.busylarge.insertBack(s) 710 } 711 } 712 } 713 // heap_scan and heap_live were updated. 714 if gcBlackenEnabled != 0 { 715 gcController.revise() 716 } 717 718 if trace.enabled { 719 traceHeapAlloc() 720 } 721 722 // h.spans is accessed concurrently without synchronization 723 // from other threads. Hence, there must be a store/store 724 // barrier here to ensure the writes to h.spans above happen 725 // before the caller can publish a pointer p to an object 726 // allocated from s. As soon as this happens, the garbage 727 // collector running on another processor could read p and 728 // look up s in h.spans. The unlock acts as the barrier to 729 // order these writes. On the read side, the data dependency 730 // between p and the index in h.spans orders the reads. 731 unlock(&h.lock) 732 return s 733 } 734 735 func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan { 736 // Don't do any operations that lock the heap on the G stack. 737 // It might trigger stack growth, and the stack growth code needs 738 // to be able to allocate heap. 739 var s *mspan 740 systemstack(func() { 741 s = h.alloc_m(npage, spanclass, large) 742 }) 743 744 if s != nil { 745 if needzero && s.needzero != 0 { 746 memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift) 747 } 748 s.needzero = 0 749 } 750 return s 751 } 752 753 // allocManual allocates a manually-managed span of npage pages. 754 // allocManual returns nil if allocation fails. 755 // 756 // allocManual adds the bytes used to *stat, which should be a 757 // memstats in-use field. Unlike allocations in the GC'd heap, the 758 // allocation does *not* count toward heap_inuse or heap_sys. 759 // 760 // The memory backing the returned span may not be zeroed if 761 // span.needzero is set. 762 // 763 // allocManual must be called on the system stack to prevent stack 764 // growth. Since this is used by the stack allocator, stack growth 765 // during allocManual would self-deadlock. 766 // 767 //go:systemstack 768 func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { 769 lock(&h.lock) 770 s := h.allocSpanLocked(npage, stat) 771 if s != nil { 772 s.state = _MSpanManual 773 s.manualFreeList = 0 774 s.allocCount = 0 775 s.spanclass = 0 776 s.nelems = 0 777 s.elemsize = 0 778 s.limit = s.base() + s.npages<<_PageShift 779 // Manually manged memory doesn't count toward heap_sys. 780 memstats.heap_sys -= uint64(s.npages << _PageShift) 781 } 782 783 // This unlock acts as a release barrier. See mheap.alloc_m. 784 unlock(&h.lock) 785 786 return s 787 } 788 789 // Allocates a span of the given size. h must be locked. 790 // The returned span has been removed from the 791 // free list, but its state is still MSpanFree. 792 func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { 793 var list *mSpanList 794 var s *mspan 795 796 // Try in fixed-size lists up to max. 797 for i := int(npage); i < len(h.free); i++ { 798 list = &h.free[i] 799 if !list.isEmpty() { 800 s = list.first 801 list.remove(s) 802 goto HaveSpan 803 } 804 } 805 // Best fit in list of large spans. 806 s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us 807 if s == nil { 808 if !h.grow(npage) { 809 return nil 810 } 811 s = h.allocLarge(npage) 812 if s == nil { 813 return nil 814 } 815 } 816 817 HaveSpan: 818 // Mark span in use. 819 if s.state != _MSpanFree { 820 throw("MHeap_AllocLocked - MSpan not free") 821 } 822 if s.npages < npage { 823 throw("MHeap_AllocLocked - bad npages") 824 } 825 if s.npreleased > 0 { 826 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) 827 memstats.heap_released -= uint64(s.npreleased << _PageShift) 828 s.npreleased = 0 829 } 830 831 if s.npages > npage { 832 // Trim extra and put it back in the heap. 833 t := (*mspan)(h.spanalloc.alloc()) 834 t.init(s.base()+npage<<_PageShift, s.npages-npage) 835 s.npages = npage 836 p := (t.base() - h.arena_start) >> _PageShift 837 if p > 0 { 838 h.spans[p-1] = s 839 } 840 h.spans[p] = t 841 h.spans[p+t.npages-1] = t 842 t.needzero = s.needzero 843 s.state = _MSpanManual // prevent coalescing with s 844 t.state = _MSpanManual 845 h.freeSpanLocked(t, false, false, s.unusedsince) 846 s.state = _MSpanFree 847 } 848 s.unusedsince = 0 849 850 p := (s.base() - h.arena_start) >> _PageShift 851 for n := uintptr(0); n < npage; n++ { 852 h.spans[p+n] = s 853 } 854 855 *stat += uint64(npage << _PageShift) 856 memstats.heap_idle -= uint64(npage << _PageShift) 857 858 //println("spanalloc", hex(s.start<<_PageShift)) 859 if s.inList() { 860 throw("still in list") 861 } 862 return s 863 } 864 865 // Large spans have a minimum size of 1MByte. The maximum number of large spans to support 866 // 1TBytes is 1 million, experimentation using random sizes indicates that the depth of 867 // the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced 868 // by a perfectly balanced tree with a depth of 20. Twice that is an acceptable 40. 869 func (h *mheap) isLargeSpan(npages uintptr) bool { 870 return npages >= uintptr(len(h.free)) 871 } 872 873 // allocLarge allocates a span of at least npage pages from the treap of large spans. 874 // Returns nil if no such span currently exists. 875 func (h *mheap) allocLarge(npage uintptr) *mspan { 876 // Search treap for smallest span with >= npage pages. 877 return h.freelarge.remove(npage) 878 } 879 880 // Try to add at least npage pages of memory to the heap, 881 // returning whether it worked. 882 // 883 // h must be locked. 884 func (h *mheap) grow(npage uintptr) bool { 885 // Ask for a big chunk, to reduce the number of mappings 886 // the operating system needs to track; also amortizes 887 // the overhead of an operating system mapping. 888 // Allocate a multiple of 64kB. 889 npage = round(npage, (64<<10)/_PageSize) 890 ask := npage << _PageShift 891 if ask < _HeapAllocChunk { 892 ask = _HeapAllocChunk 893 } 894 895 v := h.sysAlloc(ask) 896 if v == nil { 897 if ask > npage<<_PageShift { 898 ask = npage << _PageShift 899 v = h.sysAlloc(ask) 900 } 901 if v == nil { 902 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") 903 return false 904 } 905 } 906 907 // Create a fake "in use" span and free it, so that the 908 // right coalescing happens. 909 s := (*mspan)(h.spanalloc.alloc()) 910 s.init(uintptr(v), ask>>_PageShift) 911 p := (s.base() - h.arena_start) >> _PageShift 912 for i := p; i < p+s.npages; i++ { 913 h.spans[i] = s 914 } 915 atomic.Store(&s.sweepgen, h.sweepgen) 916 s.state = _MSpanInUse 917 h.pagesInUse += uint64(s.npages) 918 h.freeSpanLocked(s, false, true, 0) 919 return true 920 } 921 922 // Look up the span at the given address. 923 // Address is guaranteed to be in map 924 // and is guaranteed to be start or end of span. 925 func (h *mheap) lookup(v unsafe.Pointer) *mspan { 926 p := uintptr(v) 927 p -= h.arena_start 928 return h.spans[p>>_PageShift] 929 } 930 931 // Look up the span at the given address. 932 // Address is *not* guaranteed to be in map 933 // and may be anywhere in the span. 934 // Map entries for the middle of a span are only 935 // valid for allocated spans. Free spans may have 936 // other garbage in their middles, so we have to 937 // check for that. 938 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { 939 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { 940 return nil 941 } 942 s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift] 943 if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { 944 return nil 945 } 946 return s 947 } 948 949 // Free the span back into the heap. 950 func (h *mheap) freeSpan(s *mspan, acct int32) { 951 systemstack(func() { 952 mp := getg().m 953 lock(&h.lock) 954 memstats.heap_scan += uint64(mp.mcache.local_scan) 955 mp.mcache.local_scan = 0 956 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) 957 mp.mcache.local_tinyallocs = 0 958 if msanenabled { 959 // Tell msan that this entire span is no longer in use. 960 base := unsafe.Pointer(s.base()) 961 bytes := s.npages << _PageShift 962 msanfree(base, bytes) 963 } 964 if acct != 0 { 965 memstats.heap_objects-- 966 } 967 if gcBlackenEnabled != 0 { 968 // heap_scan changed. 969 gcController.revise() 970 } 971 h.freeSpanLocked(s, true, true, 0) 972 unlock(&h.lock) 973 }) 974 } 975 976 // freeManual frees a manually-managed span returned by allocManual. 977 // stat must be the same as the stat passed to the allocManual that 978 // allocated s. 979 // 980 // This must only be called when gcphase == _GCoff. See mSpanState for 981 // an explanation. 982 // 983 // freeManual must be called on the system stack to prevent stack 984 // growth, just like allocManual. 985 // 986 //go:systemstack 987 func (h *mheap) freeManual(s *mspan, stat *uint64) { 988 s.needzero = 1 989 lock(&h.lock) 990 *stat -= uint64(s.npages << _PageShift) 991 memstats.heap_sys += uint64(s.npages << _PageShift) 992 h.freeSpanLocked(s, false, true, 0) 993 unlock(&h.lock) 994 } 995 996 // s must be on a busy list (h.busy or h.busylarge) or unlinked. 997 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { 998 switch s.state { 999 case _MSpanManual: 1000 if s.allocCount != 0 { 1001 throw("MHeap_FreeSpanLocked - invalid stack free") 1002 } 1003 case _MSpanInUse: 1004 if s.allocCount != 0 || s.sweepgen != h.sweepgen { 1005 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 1006 throw("MHeap_FreeSpanLocked - invalid free") 1007 } 1008 h.pagesInUse -= uint64(s.npages) 1009 default: 1010 throw("MHeap_FreeSpanLocked - invalid span state") 1011 } 1012 1013 if acctinuse { 1014 memstats.heap_inuse -= uint64(s.npages << _PageShift) 1015 } 1016 if acctidle { 1017 memstats.heap_idle += uint64(s.npages << _PageShift) 1018 } 1019 s.state = _MSpanFree 1020 if s.inList() { 1021 h.busyList(s.npages).remove(s) 1022 } 1023 1024 // Stamp newly unused spans. The scavenger will use that 1025 // info to potentially give back some pages to the OS. 1026 s.unusedsince = unusedsince 1027 if unusedsince == 0 { 1028 s.unusedsince = nanotime() 1029 } 1030 s.npreleased = 0 1031 1032 // Coalesce with earlier, later spans. 1033 p := (s.base() - h.arena_start) >> _PageShift 1034 if p > 0 { 1035 before := h.spans[p-1] 1036 if before != nil && before.state == _MSpanFree { 1037 // Now adjust s. 1038 s.startAddr = before.startAddr 1039 s.npages += before.npages 1040 s.npreleased = before.npreleased // absorb released pages 1041 s.needzero |= before.needzero 1042 p -= before.npages 1043 h.spans[p] = s 1044 // The size is potentially changing so the treap needs to delete adjacent nodes and 1045 // insert back as a combined node. 1046 if h.isLargeSpan(before.npages) { 1047 // We have a t, it is large so it has to be in the treap so we can remove it. 1048 h.freelarge.removeSpan(before) 1049 } else { 1050 h.freeList(before.npages).remove(before) 1051 } 1052 before.state = _MSpanDead 1053 h.spanalloc.free(unsafe.Pointer(before)) 1054 } 1055 } 1056 1057 // Now check to see if next (greater addresses) span is free and can be coalesced. 1058 if (p + s.npages) < uintptr(len(h.spans)) { 1059 after := h.spans[p+s.npages] 1060 if after != nil && after.state == _MSpanFree { 1061 s.npages += after.npages 1062 s.npreleased += after.npreleased 1063 s.needzero |= after.needzero 1064 h.spans[p+s.npages-1] = s 1065 if h.isLargeSpan(after.npages) { 1066 h.freelarge.removeSpan(after) 1067 } else { 1068 h.freeList(after.npages).remove(after) 1069 } 1070 after.state = _MSpanDead 1071 h.spanalloc.free(unsafe.Pointer(after)) 1072 } 1073 } 1074 1075 // Insert s into appropriate list or treap. 1076 if h.isLargeSpan(s.npages) { 1077 h.freelarge.insert(s) 1078 } else { 1079 h.freeList(s.npages).insert(s) 1080 } 1081 } 1082 1083 func (h *mheap) freeList(npages uintptr) *mSpanList { 1084 return &h.free[npages] 1085 } 1086 1087 func (h *mheap) busyList(npages uintptr) *mSpanList { 1088 if npages < uintptr(len(h.busy)) { 1089 return &h.busy[npages] 1090 } 1091 return &h.busylarge 1092 } 1093 1094 func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr { 1095 s := t.spanKey 1096 var sumreleased uintptr 1097 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { 1098 start := s.base() 1099 end := start + s.npages<<_PageShift 1100 if physPageSize > _PageSize { 1101 // We can only release pages in 1102 // physPageSize blocks, so round start 1103 // and end in. (Otherwise, madvise 1104 // will round them *out* and release 1105 // more memory than we want.) 1106 start = (start + physPageSize - 1) &^ (physPageSize - 1) 1107 end &^= physPageSize - 1 1108 if end <= start { 1109 // start and end don't span a 1110 // whole physical page. 1111 return sumreleased 1112 } 1113 } 1114 len := end - start 1115 released := len - (s.npreleased << _PageShift) 1116 if physPageSize > _PageSize && released == 0 { 1117 return sumreleased 1118 } 1119 memstats.heap_released += uint64(released) 1120 sumreleased += released 1121 s.npreleased = len >> _PageShift 1122 sysUnused(unsafe.Pointer(start), len) 1123 } 1124 return sumreleased 1125 } 1126 1127 func scavengelist(list *mSpanList, now, limit uint64) uintptr { 1128 if list.isEmpty() { 1129 return 0 1130 } 1131 1132 var sumreleased uintptr 1133 for s := list.first; s != nil; s = s.next { 1134 if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages { 1135 continue 1136 } 1137 start := s.base() 1138 end := start + s.npages<<_PageShift 1139 if physPageSize > _PageSize { 1140 // We can only release pages in 1141 // physPageSize blocks, so round start 1142 // and end in. (Otherwise, madvise 1143 // will round them *out* and release 1144 // more memory than we want.) 1145 start = (start + physPageSize - 1) &^ (physPageSize - 1) 1146 end &^= physPageSize - 1 1147 if end <= start { 1148 // start and end don't span a 1149 // whole physical page. 1150 continue 1151 } 1152 } 1153 len := end - start 1154 1155 released := len - (s.npreleased << _PageShift) 1156 if physPageSize > _PageSize && released == 0 { 1157 continue 1158 } 1159 memstats.heap_released += uint64(released) 1160 sumreleased += released 1161 s.npreleased = len >> _PageShift 1162 sysUnused(unsafe.Pointer(start), len) 1163 } 1164 return sumreleased 1165 } 1166 1167 func (h *mheap) scavenge(k int32, now, limit uint64) { 1168 // Disallow malloc or panic while holding the heap lock. We do 1169 // this here because this is an non-mallocgc entry-point to 1170 // the mheap API. 1171 gp := getg() 1172 gp.m.mallocing++ 1173 lock(&h.lock) 1174 var sumreleased uintptr 1175 for i := 0; i < len(h.free); i++ { 1176 sumreleased += scavengelist(&h.free[i], now, limit) 1177 } 1178 sumreleased += scavengetreap(h.freelarge.treap, now, limit) 1179 unlock(&h.lock) 1180 gp.m.mallocing-- 1181 1182 if debug.gctrace > 0 { 1183 if sumreleased > 0 { 1184 print("scvg", k, ": ", sumreleased>>20, " MB released\n") 1185 } 1186 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") 1187 } 1188 } 1189 1190 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 1191 func runtime_debug_freeOSMemory() { 1192 GC() 1193 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) 1194 } 1195 1196 // Initialize a new span with the given start and npages. 1197 func (span *mspan) init(base uintptr, npages uintptr) { 1198 // span is *not* zeroed. 1199 span.next = nil 1200 span.prev = nil 1201 span.list = nil 1202 span.startAddr = base 1203 span.npages = npages 1204 span.allocCount = 0 1205 span.spanclass = 0 1206 span.incache = false 1207 span.elemsize = 0 1208 span.state = _MSpanDead 1209 span.unusedsince = 0 1210 span.npreleased = 0 1211 span.speciallock.key = 0 1212 span.specials = nil 1213 span.needzero = 0 1214 span.freeindex = 0 1215 span.allocBits = nil 1216 span.gcmarkBits = nil 1217 } 1218 1219 func (span *mspan) inList() bool { 1220 return span.list != nil 1221 } 1222 1223 // Initialize an empty doubly-linked list. 1224 func (list *mSpanList) init() { 1225 list.first = nil 1226 list.last = nil 1227 } 1228 1229 func (list *mSpanList) remove(span *mspan) { 1230 if span.list != list { 1231 print("runtime: failed MSpanList_Remove span.npages=", span.npages, 1232 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n") 1233 throw("MSpanList_Remove") 1234 } 1235 if list.first == span { 1236 list.first = span.next 1237 } else { 1238 span.prev.next = span.next 1239 } 1240 if list.last == span { 1241 list.last = span.prev 1242 } else { 1243 span.next.prev = span.prev 1244 } 1245 span.next = nil 1246 span.prev = nil 1247 span.list = nil 1248 } 1249 1250 func (list *mSpanList) isEmpty() bool { 1251 return list.first == nil 1252 } 1253 1254 func (list *mSpanList) insert(span *mspan) { 1255 if span.next != nil || span.prev != nil || span.list != nil { 1256 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list) 1257 throw("MSpanList_Insert") 1258 } 1259 span.next = list.first 1260 if list.first != nil { 1261 // The list contains at least one span; link it in. 1262 // The last span in the list doesn't change. 1263 list.first.prev = span 1264 } else { 1265 // The list contains no spans, so this is also the last span. 1266 list.last = span 1267 } 1268 list.first = span 1269 span.list = list 1270 } 1271 1272 func (list *mSpanList) insertBack(span *mspan) { 1273 if span.next != nil || span.prev != nil || span.list != nil { 1274 println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list) 1275 throw("MSpanList_InsertBack") 1276 } 1277 span.prev = list.last 1278 if list.last != nil { 1279 // The list contains at least one span. 1280 list.last.next = span 1281 } else { 1282 // The list contains no spans, so this is also the first span. 1283 list.first = span 1284 } 1285 list.last = span 1286 span.list = list 1287 } 1288 1289 // takeAll removes all spans from other and inserts them at the front 1290 // of list. 1291 func (list *mSpanList) takeAll(other *mSpanList) { 1292 if other.isEmpty() { 1293 return 1294 } 1295 1296 // Reparent everything in other to list. 1297 for s := other.first; s != nil; s = s.next { 1298 s.list = list 1299 } 1300 1301 // Concatenate the lists. 1302 if list.isEmpty() { 1303 *list = *other 1304 } else { 1305 // Neither list is empty. Put other before list. 1306 other.last.next = list.first 1307 list.first.prev = other.last 1308 list.first = other.first 1309 } 1310 1311 other.first, other.last = nil, nil 1312 } 1313 1314 const ( 1315 _KindSpecialFinalizer = 1 1316 _KindSpecialProfile = 2 1317 // Note: The finalizer special must be first because if we're freeing 1318 // an object, a finalizer special will cause the freeing operation 1319 // to abort, and we want to keep the other special records around 1320 // if that happens. 1321 ) 1322 1323 //go:notinheap 1324 type special struct { 1325 next *special // linked list in span 1326 offset uint16 // span offset of object 1327 kind byte // kind of special 1328 } 1329 1330 // Adds the special record s to the list of special records for 1331 // the object p. All fields of s should be filled in except for 1332 // offset & next, which this routine will fill in. 1333 // Returns true if the special was successfully added, false otherwise. 1334 // (The add will fail only if a record with the same p and s->kind 1335 // already exists.) 1336 func addspecial(p unsafe.Pointer, s *special) bool { 1337 span := mheap_.lookupMaybe(p) 1338 if span == nil { 1339 throw("addspecial on invalid pointer") 1340 } 1341 1342 // Ensure that the span is swept. 1343 // Sweeping accesses the specials list w/o locks, so we have 1344 // to synchronize with it. And it's just much safer. 1345 mp := acquirem() 1346 span.ensureSwept() 1347 1348 offset := uintptr(p) - span.base() 1349 kind := s.kind 1350 1351 lock(&span.speciallock) 1352 1353 // Find splice point, check for existing record. 1354 t := &span.specials 1355 for { 1356 x := *t 1357 if x == nil { 1358 break 1359 } 1360 if offset == uintptr(x.offset) && kind == x.kind { 1361 unlock(&span.speciallock) 1362 releasem(mp) 1363 return false // already exists 1364 } 1365 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) { 1366 break 1367 } 1368 t = &x.next 1369 } 1370 1371 // Splice in record, fill in offset. 1372 s.offset = uint16(offset) 1373 s.next = *t 1374 *t = s 1375 unlock(&span.speciallock) 1376 releasem(mp) 1377 1378 return true 1379 } 1380 1381 // Removes the Special record of the given kind for the object p. 1382 // Returns the record if the record existed, nil otherwise. 1383 // The caller must FixAlloc_Free the result. 1384 func removespecial(p unsafe.Pointer, kind uint8) *special { 1385 span := mheap_.lookupMaybe(p) 1386 if span == nil { 1387 throw("removespecial on invalid pointer") 1388 } 1389 1390 // Ensure that the span is swept. 1391 // Sweeping accesses the specials list w/o locks, so we have 1392 // to synchronize with it. And it's just much safer. 1393 mp := acquirem() 1394 span.ensureSwept() 1395 1396 offset := uintptr(p) - span.base() 1397 1398 lock(&span.speciallock) 1399 t := &span.specials 1400 for { 1401 s := *t 1402 if s == nil { 1403 break 1404 } 1405 // This function is used for finalizers only, so we don't check for 1406 // "interior" specials (p must be exactly equal to s->offset). 1407 if offset == uintptr(s.offset) && kind == s.kind { 1408 *t = s.next 1409 unlock(&span.speciallock) 1410 releasem(mp) 1411 return s 1412 } 1413 t = &s.next 1414 } 1415 unlock(&span.speciallock) 1416 releasem(mp) 1417 return nil 1418 } 1419 1420 // The described object has a finalizer set for it. 1421 // 1422 // specialfinalizer is allocated from non-GC'd memory, so any heap 1423 // pointers must be specially handled. 1424 // 1425 //go:notinheap 1426 type specialfinalizer struct { 1427 special special 1428 fn *funcval // May be a heap pointer. 1429 nret uintptr 1430 fint *_type // May be a heap pointer, but always live. 1431 ot *ptrtype // May be a heap pointer, but always live. 1432 } 1433 1434 // Adds a finalizer to the object p. Returns true if it succeeded. 1435 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 1436 lock(&mheap_.speciallock) 1437 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 1438 unlock(&mheap_.speciallock) 1439 s.special.kind = _KindSpecialFinalizer 1440 s.fn = f 1441 s.nret = nret 1442 s.fint = fint 1443 s.ot = ot 1444 if addspecial(p, &s.special) { 1445 // This is responsible for maintaining the same 1446 // GC-related invariants as markrootSpans in any 1447 // situation where it's possible that markrootSpans 1448 // has already run but mark termination hasn't yet. 1449 if gcphase != _GCoff { 1450 _, base, _ := findObject(p) 1451 mp := acquirem() 1452 gcw := &mp.p.ptr().gcw 1453 // Mark everything reachable from the object 1454 // so it's retained for the finalizer. 1455 scanobject(uintptr(base), gcw) 1456 // Mark the finalizer itself, since the 1457 // special isn't part of the GC'd heap. 1458 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) 1459 if gcBlackenPromptly { 1460 gcw.dispose() 1461 } 1462 releasem(mp) 1463 } 1464 return true 1465 } 1466 1467 // There was an old finalizer 1468 lock(&mheap_.speciallock) 1469 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1470 unlock(&mheap_.speciallock) 1471 return false 1472 } 1473 1474 // Removes the finalizer (if any) from the object p. 1475 func removefinalizer(p unsafe.Pointer) { 1476 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 1477 if s == nil { 1478 return // there wasn't a finalizer to remove 1479 } 1480 lock(&mheap_.speciallock) 1481 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1482 unlock(&mheap_.speciallock) 1483 } 1484 1485 // The described object is being heap profiled. 1486 // 1487 //go:notinheap 1488 type specialprofile struct { 1489 special special 1490 b *bucket 1491 } 1492 1493 // Set the heap profile bucket associated with addr to b. 1494 func setprofilebucket(p unsafe.Pointer, b *bucket) { 1495 lock(&mheap_.speciallock) 1496 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 1497 unlock(&mheap_.speciallock) 1498 s.special.kind = _KindSpecialProfile 1499 s.b = b 1500 if !addspecial(p, &s.special) { 1501 throw("setprofilebucket: profile already set") 1502 } 1503 } 1504 1505 // Do whatever cleanup needs to be done to deallocate s. It has 1506 // already been unlinked from the MSpan specials list. 1507 func freespecial(s *special, p unsafe.Pointer, size uintptr) { 1508 switch s.kind { 1509 case _KindSpecialFinalizer: 1510 sf := (*specialfinalizer)(unsafe.Pointer(s)) 1511 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 1512 lock(&mheap_.speciallock) 1513 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 1514 unlock(&mheap_.speciallock) 1515 case _KindSpecialProfile: 1516 sp := (*specialprofile)(unsafe.Pointer(s)) 1517 mProf_Free(sp.b, size) 1518 lock(&mheap_.speciallock) 1519 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 1520 unlock(&mheap_.speciallock) 1521 default: 1522 throw("bad special kind") 1523 panic("not reached") 1524 } 1525 } 1526 1527 // gcBits is an alloc/mark bitmap. This is always used as *gcBits. 1528 // 1529 //go:notinheap 1530 type gcBits uint8 1531 1532 // bytep returns a pointer to the n'th byte of b. 1533 func (b *gcBits) bytep(n uintptr) *uint8 { 1534 return addb((*uint8)(b), n) 1535 } 1536 1537 // bitp returns a pointer to the byte containing bit n and a mask for 1538 // selecting that bit from *bytep. 1539 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { 1540 return b.bytep(n / 8), 1 << (n % 8) 1541 } 1542 1543 const gcBitsChunkBytes = uintptr(64 << 10) 1544 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) 1545 1546 type gcBitsHeader struct { 1547 free uintptr // free is the index into bits of the next free byte. 1548 next uintptr // *gcBits triggers recursive type bug. (issue 14620) 1549 } 1550 1551 //go:notinheap 1552 type gcBitsArena struct { 1553 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. 1554 free uintptr // free is the index into bits of the next free byte; read/write atomically 1555 next *gcBitsArena 1556 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits 1557 } 1558 1559 var gcBitsArenas struct { 1560 lock mutex 1561 free *gcBitsArena 1562 next *gcBitsArena // Read atomically. Write atomically under lock. 1563 current *gcBitsArena 1564 previous *gcBitsArena 1565 } 1566 1567 // tryAlloc allocates from b or returns nil if b does not have enough room. 1568 // This is safe to call concurrently. 1569 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { 1570 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { 1571 return nil 1572 } 1573 // Try to allocate from this block. 1574 end := atomic.Xadduintptr(&b.free, bytes) 1575 if end > uintptr(len(b.bits)) { 1576 return nil 1577 } 1578 // There was enough room. 1579 start := end - bytes 1580 return &b.bits[start] 1581 } 1582 1583 // newMarkBits returns a pointer to 8 byte aligned bytes 1584 // to be used for a span's mark bits. 1585 func newMarkBits(nelems uintptr) *gcBits { 1586 blocksNeeded := uintptr((nelems + 63) / 64) 1587 bytesNeeded := blocksNeeded * 8 1588 1589 // Try directly allocating from the current head arena. 1590 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) 1591 if p := head.tryAlloc(bytesNeeded); p != nil { 1592 return p 1593 } 1594 1595 // There's not enough room in the head arena. We may need to 1596 // allocate a new arena. 1597 lock(&gcBitsArenas.lock) 1598 // Try the head arena again, since it may have changed. Now 1599 // that we hold the lock, the list head can't change, but its 1600 // free position still can. 1601 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 1602 unlock(&gcBitsArenas.lock) 1603 return p 1604 } 1605 1606 // Allocate a new arena. This may temporarily drop the lock. 1607 fresh := newArenaMayUnlock() 1608 // If newArenaMayUnlock dropped the lock, another thread may 1609 // have put a fresh arena on the "next" list. Try allocating 1610 // from next again. 1611 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 1612 // Put fresh back on the free list. 1613 // TODO: Mark it "already zeroed" 1614 fresh.next = gcBitsArenas.free 1615 gcBitsArenas.free = fresh 1616 unlock(&gcBitsArenas.lock) 1617 return p 1618 } 1619 1620 // Allocate from the fresh arena. We haven't linked it in yet, so 1621 // this cannot race and is guaranteed to succeed. 1622 p := fresh.tryAlloc(bytesNeeded) 1623 if p == nil { 1624 throw("markBits overflow") 1625 } 1626 1627 // Add the fresh arena to the "next" list. 1628 fresh.next = gcBitsArenas.next 1629 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh)) 1630 1631 unlock(&gcBitsArenas.lock) 1632 return p 1633 } 1634 1635 // newAllocBits returns a pointer to 8 byte aligned bytes 1636 // to be used for this span's alloc bits. 1637 // newAllocBits is used to provide newly initialized spans 1638 // allocation bits. For spans not being initialized the 1639 // the mark bits are repurposed as allocation bits when 1640 // the span is swept. 1641 func newAllocBits(nelems uintptr) *gcBits { 1642 return newMarkBits(nelems) 1643 } 1644 1645 // nextMarkBitArenaEpoch establishes a new epoch for the arenas 1646 // holding the mark bits. The arenas are named relative to the 1647 // current GC cycle which is demarcated by the call to finishweep_m. 1648 // 1649 // All current spans have been swept. 1650 // During that sweep each span allocated room for its gcmarkBits in 1651 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current 1652 // where the GC will mark objects and after each span is swept these bits 1653 // will be used to allocate objects. 1654 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's 1655 // gcAllocBits live until all the spans have been swept during this GC cycle. 1656 // The span's sweep extinguishes all the references to gcBitsArenas.previous 1657 // by pointing gcAllocBits into the gcBitsArenas.current. 1658 // The gcBitsArenas.previous is released to the gcBitsArenas.free list. 1659 func nextMarkBitArenaEpoch() { 1660 lock(&gcBitsArenas.lock) 1661 if gcBitsArenas.previous != nil { 1662 if gcBitsArenas.free == nil { 1663 gcBitsArenas.free = gcBitsArenas.previous 1664 } else { 1665 // Find end of previous arenas. 1666 last := gcBitsArenas.previous 1667 for last = gcBitsArenas.previous; last.next != nil; last = last.next { 1668 } 1669 last.next = gcBitsArenas.free 1670 gcBitsArenas.free = gcBitsArenas.previous 1671 } 1672 } 1673 gcBitsArenas.previous = gcBitsArenas.current 1674 gcBitsArenas.current = gcBitsArenas.next 1675 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed 1676 unlock(&gcBitsArenas.lock) 1677 } 1678 1679 // newArenaMayUnlock allocates and zeroes a gcBits arena. 1680 // The caller must hold gcBitsArena.lock. This may temporarily release it. 1681 func newArenaMayUnlock() *gcBitsArena { 1682 var result *gcBitsArena 1683 if gcBitsArenas.free == nil { 1684 unlock(&gcBitsArenas.lock) 1685 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys)) 1686 if result == nil { 1687 throw("runtime: cannot allocate memory") 1688 } 1689 lock(&gcBitsArenas.lock) 1690 } else { 1691 result = gcBitsArenas.free 1692 gcBitsArenas.free = gcBitsArenas.free.next 1693 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) 1694 } 1695 result.next = nil 1696 // If result.bits is not 8 byte aligned adjust index so 1697 // that &result.bits[result.free] is 8 byte aligned. 1698 if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 { 1699 result.free = 0 1700 } else { 1701 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) 1702 } 1703 return result 1704 }