github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/runtime/mheap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // minPhysPageSize is a lower-bound on the physical page size. The 18 // true physical page size may be larger than this. In contrast, 19 // sys.PhysPageSize is an upper-bound on the physical page size. 20 const minPhysPageSize = 4096 21 22 // Main malloc heap. 23 // The heap itself is the "free[]" and "large" arrays, 24 // but all the other global data is here too. 25 // 26 // mheap must not be heap-allocated because it contains mSpanLists, 27 // which must not be heap-allocated. 28 // 29 //go:notinheap 30 type mheap struct { 31 lock mutex 32 free [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList 33 freelarge mTreap // free treap of length >= _MaxMHeapList 34 busy [_MaxMHeapList]mSpanList // busy lists of large spans of given length 35 busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList 36 sweepgen uint32 // sweep generation, see comment in mspan 37 sweepdone uint32 // all spans are swept 38 sweepers uint32 // number of active sweepone calls 39 40 // allspans is a slice of all mspans ever created. Each mspan 41 // appears exactly once. 42 // 43 // The memory for allspans is manually managed and can be 44 // reallocated and move as the heap grows. 45 // 46 // In general, allspans is protected by mheap_.lock, which 47 // prevents concurrent access as well as freeing the backing 48 // store. Accesses during STW might not hold the lock, but 49 // must ensure that allocation cannot happen around the 50 // access (since that may free the backing store). 51 allspans []*mspan // all spans out there 52 53 // spans is a lookup table to map virtual address page IDs to *mspan. 54 // For allocated spans, their pages map to the span itself. 55 // For free spans, only the lowest and highest pages map to the span itself. 56 // Internal pages map to an arbitrary span. 57 // For pages that have never been allocated, spans entries are nil. 58 // 59 // Modifications are protected by mheap.lock. Reads can be 60 // performed without locking, but ONLY from indexes that are 61 // known to contain in-use or stack spans. This means there 62 // must not be a safe-point between establishing that an 63 // address is live and looking it up in the spans array. 64 // 65 // This is backed by a reserved region of the address space so 66 // it can grow without moving. The memory up to len(spans) is 67 // mapped. cap(spans) indicates the total reserved memory. 68 spans []*mspan 69 70 // sweepSpans contains two mspan stacks: one of swept in-use 71 // spans, and one of unswept in-use spans. These two trade 72 // roles on each GC cycle. Since the sweepgen increases by 2 73 // on each cycle, this means the swept spans are in 74 // sweepSpans[sweepgen/2%2] and the unswept spans are in 75 // sweepSpans[1-sweepgen/2%2]. Sweeping pops spans from the 76 // unswept stack and pushes spans that are still in-use on the 77 // swept stack. Likewise, allocating an in-use span pushes it 78 // on the swept stack. 79 sweepSpans [2]gcSweepBuf 80 81 _ uint32 // align uint64 fields on 32-bit for atomics 82 83 // Proportional sweep 84 // 85 // These parameters represent a linear function from heap_live 86 // to page sweep count. The proportional sweep system works to 87 // stay in the black by keeping the current page sweep count 88 // above this line at the current heap_live. 89 // 90 // The line has slope sweepPagesPerByte and passes through a 91 // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At 92 // any given time, the system is at (memstats.heap_live, 93 // pagesSwept) in this space. 94 // 95 // It's important that the line pass through a point we 96 // control rather than simply starting at a (0,0) origin 97 // because that lets us adjust sweep pacing at any time while 98 // accounting for current progress. If we could only adjust 99 // the slope, it would create a discontinuity in debt if any 100 // progress has already been made. 101 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock 102 pagesSwept uint64 // pages swept this cycle; updated atomically 103 pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically 104 sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without 105 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 106 // TODO(austin): pagesInUse should be a uintptr, but the 386 107 // compiler can't 8-byte align fields. 108 109 // Malloc stats. 110 largealloc uint64 // bytes allocated for large objects 111 nlargealloc uint64 // number of large object allocations 112 largefree uint64 // bytes freed for large objects (>maxsmallsize) 113 nlargefree uint64 // number of frees for large objects (>maxsmallsize) 114 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) 115 116 // range of addresses we might see in the heap 117 bitmap uintptr // Points to one byte past the end of the bitmap 118 bitmap_mapped uintptr 119 120 // The arena_* fields indicate the addresses of the Go heap. 121 // 122 // The maximum range of the Go heap is 123 // [arena_start, arena_start+_MaxMem+1). 124 // 125 // The range of the current Go heap is 126 // [arena_start, arena_used). Parts of this range may not be 127 // mapped, but the metadata structures are always mapped for 128 // the full range. 129 arena_start uintptr 130 arena_used uintptr // Set with setArenaUsed. 131 132 // The heap is grown using a linear allocator that allocates 133 // from the block [arena_alloc, arena_end). arena_alloc is 134 // often, but *not always* equal to arena_used. 135 arena_alloc uintptr 136 arena_end uintptr 137 138 // arena_reserved indicates that the memory [arena_alloc, 139 // arena_end) is reserved (e.g., mapped PROT_NONE). If this is 140 // false, we have to be careful not to clobber existing 141 // mappings here. If this is true, then we own the mapping 142 // here and *must* clobber it to use it. 143 arena_reserved bool 144 145 _ uint32 // ensure 64-bit alignment 146 147 // central free lists for small size classes. 148 // the padding makes sure that the MCentrals are 149 // spaced CacheLineSize bytes apart, so that each MCentral.lock 150 // gets its own cache line. 151 // central is indexed by spanClass. 152 central [numSpanClasses]struct { 153 mcentral mcentral 154 pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte 155 } 156 157 spanalloc fixalloc // allocator for span* 158 cachealloc fixalloc // allocator for mcache* 159 treapalloc fixalloc // allocator for treapNodes* used by large objects 160 specialfinalizeralloc fixalloc // allocator for specialfinalizer* 161 specialprofilealloc fixalloc // allocator for specialprofile* 162 speciallock mutex // lock for special record allocators. 163 } 164 165 var mheap_ mheap 166 167 // An MSpan is a run of pages. 168 // 169 // When a MSpan is in the heap free list, state == MSpanFree 170 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 171 // 172 // When a MSpan is allocated, state == MSpanInUse or MSpanManual 173 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 174 175 // Every MSpan is in one doubly-linked list, 176 // either one of the MHeap's free lists or one of the 177 // MCentral's span lists. 178 179 // An MSpan representing actual memory has state _MSpanInUse, 180 // _MSpanManual, or _MSpanFree. Transitions between these states are 181 // constrained as follows: 182 // 183 // * A span may transition from free to in-use or manual during any GC 184 // phase. 185 // 186 // * During sweeping (gcphase == _GCoff), a span may transition from 187 // in-use to free (as a result of sweeping) or manual to free (as a 188 // result of stacks being freed). 189 // 190 // * During GC (gcphase != _GCoff), a span *must not* transition from 191 // manual or in-use to free. Because concurrent GC may read a pointer 192 // and then look up its span, the span state must be monotonic. 193 type mSpanState uint8 194 195 const ( 196 _MSpanDead mSpanState = iota 197 _MSpanInUse // allocated for garbage collected heap 198 _MSpanManual // allocated for manual management (e.g., stack allocator) 199 _MSpanFree 200 ) 201 202 // mSpanStateNames are the names of the span states, indexed by 203 // mSpanState. 204 var mSpanStateNames = []string{ 205 "_MSpanDead", 206 "_MSpanInUse", 207 "_MSpanManual", 208 "_MSpanFree", 209 } 210 211 // mSpanList heads a linked list of spans. 212 // 213 //go:notinheap 214 type mSpanList struct { 215 first *mspan // first span in list, or nil if none 216 last *mspan // last span in list, or nil if none 217 } 218 219 //go:notinheap 220 type mspan struct { 221 next *mspan // next span in list, or nil if none 222 prev *mspan // previous span in list, or nil if none 223 list *mSpanList // For debugging. TODO: Remove. 224 225 startAddr uintptr // address of first byte of span aka s.base() 226 npages uintptr // number of pages in span 227 228 manualFreeList gclinkptr // list of free objects in _MSpanManual spans 229 230 // freeindex is the slot index between 0 and nelems at which to begin scanning 231 // for the next free object in this span. 232 // Each allocation scans allocBits starting at freeindex until it encounters a 0 233 // indicating a free object. freeindex is then adjusted so that subsequent scans begin 234 // just past the newly discovered free object. 235 // 236 // If freeindex == nelem, this span has no free objects. 237 // 238 // allocBits is a bitmap of objects in this span. 239 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 240 // then object n is free; 241 // otherwise, object n is allocated. Bits starting at nelem are 242 // undefined and should never be referenced. 243 // 244 // Object n starts at address n*elemsize + (start << pageShift). 245 freeindex uintptr 246 // TODO: Look up nelems from sizeclass and remove this field if it 247 // helps performance. 248 nelems uintptr // number of object in the span. 249 250 // Cache of the allocBits at freeindex. allocCache is shifted 251 // such that the lowest bit corresponds to the bit freeindex. 252 // allocCache holds the complement of allocBits, thus allowing 253 // ctz (count trailing zero) to use it directly. 254 // allocCache may contain bits beyond s.nelems; the caller must ignore 255 // these. 256 allocCache uint64 257 258 // allocBits and gcmarkBits hold pointers to a span's mark and 259 // allocation bits. The pointers are 8 byte aligned. 260 // There are three arenas where this data is held. 261 // free: Dirty arenas that are no longer accessed 262 // and can be reused. 263 // next: Holds information to be used in the next GC cycle. 264 // current: Information being used during this GC cycle. 265 // previous: Information being used during the last GC cycle. 266 // A new GC cycle starts with the call to finishsweep_m. 267 // finishsweep_m moves the previous arena to the free arena, 268 // the current arena to the previous arena, and 269 // the next arena to the current arena. 270 // The next arena is populated as the spans request 271 // memory to hold gcmarkBits for the next GC cycle as well 272 // as allocBits for newly allocated spans. 273 // 274 // The pointer arithmetic is done "by hand" instead of using 275 // arrays to avoid bounds checks along critical performance 276 // paths. 277 // The sweep will free the old allocBits and set allocBits to the 278 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed 279 // out memory. 280 allocBits *gcBits 281 gcmarkBits *gcBits 282 283 // sweep generation: 284 // if sweepgen == h->sweepgen - 2, the span needs sweeping 285 // if sweepgen == h->sweepgen - 1, the span is currently being swept 286 // if sweepgen == h->sweepgen, the span is swept and ready to use 287 // h->sweepgen is incremented by 2 after every GC 288 289 sweepgen uint32 290 divMul uint16 // for divide by elemsize - divMagic.mul 291 baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base 292 allocCount uint16 // number of allocated objects 293 spanclass spanClass // size class and noscan (uint8) 294 incache bool // being used by an mcache 295 state mSpanState // mspaninuse etc 296 needzero uint8 // needs to be zeroed before allocation 297 divShift uint8 // for divide by elemsize - divMagic.shift 298 divShift2 uint8 // for divide by elemsize - divMagic.shift2 299 elemsize uintptr // computed from sizeclass or from npages 300 unusedsince int64 // first time spotted by gc in mspanfree state 301 npreleased uintptr // number of pages released to the os 302 limit uintptr // end of data in span 303 speciallock mutex // guards specials list 304 specials *special // linked list of special records sorted by offset. 305 } 306 307 func (s *mspan) base() uintptr { 308 return s.startAddr 309 } 310 311 func (s *mspan) layout() (size, n, total uintptr) { 312 total = s.npages << _PageShift 313 size = s.elemsize 314 if size > 0 { 315 n = total / size 316 } 317 return 318 } 319 320 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 321 h := (*mheap)(vh) 322 s := (*mspan)(p) 323 if len(h.allspans) >= cap(h.allspans) { 324 n := 64 * 1024 / sys.PtrSize 325 if n < cap(h.allspans)*3/2 { 326 n = cap(h.allspans) * 3 / 2 327 } 328 var new []*mspan 329 sp := (*slice)(unsafe.Pointer(&new)) 330 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys) 331 if sp.array == nil { 332 throw("runtime: cannot allocate memory") 333 } 334 sp.len = len(h.allspans) 335 sp.cap = n 336 if len(h.allspans) > 0 { 337 copy(new, h.allspans) 338 } 339 oldAllspans := h.allspans 340 h.allspans = new 341 if len(oldAllspans) != 0 { 342 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys) 343 } 344 } 345 h.allspans = append(h.allspans, s) 346 } 347 348 // A spanClass represents the size class and noscan-ness of a span. 349 // 350 // Each size class has a noscan spanClass and a scan spanClass. The 351 // noscan spanClass contains only noscan objects, which do not contain 352 // pointers and thus do not need to be scanned by the garbage 353 // collector. 354 type spanClass uint8 355 356 const ( 357 numSpanClasses = _NumSizeClasses << 1 358 tinySpanClass = spanClass(tinySizeClass<<1 | 1) 359 ) 360 361 func makeSpanClass(sizeclass uint8, noscan bool) spanClass { 362 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan)) 363 } 364 365 func (sc spanClass) sizeclass() int8 { 366 return int8(sc >> 1) 367 } 368 369 func (sc spanClass) noscan() bool { 370 return sc&1 != 0 371 } 372 373 // inheap reports whether b is a pointer into a (potentially dead) heap object. 374 // It returns false for pointers into _MSpanManual spans. 375 // Non-preemptible because it is used by write barriers. 376 //go:nowritebarrier 377 //go:nosplit 378 func inheap(b uintptr) bool { 379 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 380 return false 381 } 382 // Not a beginning of a block, consult span table to find the block beginning. 383 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] 384 if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse { 385 return false 386 } 387 return true 388 } 389 390 // inHeapOrStack is a variant of inheap that returns true for pointers 391 // into any allocated heap span. 392 // 393 //go:nowritebarrier 394 //go:nosplit 395 func inHeapOrStack(b uintptr) bool { 396 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 397 return false 398 } 399 // Not a beginning of a block, consult span table to find the block beginning. 400 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] 401 if s == nil || b < s.base() { 402 return false 403 } 404 switch s.state { 405 case mSpanInUse, _MSpanManual: 406 return b < s.limit 407 default: 408 return false 409 } 410 } 411 412 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. 413 // Use the functions instead. 414 415 // spanOf returns the span of p. If p does not point into the heap or 416 // no span contains p, spanOf returns nil. 417 func spanOf(p uintptr) *mspan { 418 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { 419 return nil 420 } 421 return spanOfUnchecked(p) 422 } 423 424 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 425 // that p points into the heap (that is, mheap_.arena_start <= p < 426 // mheap_.arena_used). 427 func spanOfUnchecked(p uintptr) *mspan { 428 return mheap_.spans[(p-mheap_.arena_start)>>_PageShift] 429 } 430 431 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { 432 _g_ := getg() 433 434 _g_.m.mcache.local_nlookup++ 435 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 { 436 // purge cache stats to prevent overflow 437 lock(&mheap_.lock) 438 purgecachedstats(_g_.m.mcache) 439 unlock(&mheap_.lock) 440 } 441 442 s := mheap_.lookupMaybe(unsafe.Pointer(v)) 443 if sp != nil { 444 *sp = s 445 } 446 if s == nil { 447 if base != nil { 448 *base = 0 449 } 450 if size != nil { 451 *size = 0 452 } 453 return 0 454 } 455 456 p := s.base() 457 if s.spanclass.sizeclass() == 0 { 458 // Large object. 459 if base != nil { 460 *base = p 461 } 462 if size != nil { 463 *size = s.npages << _PageShift 464 } 465 return 1 466 } 467 468 n := s.elemsize 469 if base != nil { 470 i := (v - p) / n 471 *base = p + i*n 472 } 473 if size != nil { 474 *size = n 475 } 476 477 return 1 478 } 479 480 // Initialize the heap. 481 func (h *mheap) init(spansStart, spansBytes uintptr) { 482 h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys) 483 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 484 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 485 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 486 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 487 488 // Don't zero mspan allocations. Background sweeping can 489 // inspect a span concurrently with allocating it, so it's 490 // important that the span's sweepgen survive across freeing 491 // and re-allocating a span to prevent background sweeping 492 // from improperly cas'ing it from 0. 493 // 494 // This is safe because mspan contains no heap pointers. 495 h.spanalloc.zero = false 496 497 // h->mapcache needs no init 498 for i := range h.free { 499 h.free[i].init() 500 h.busy[i].init() 501 } 502 503 h.busylarge.init() 504 for i := range h.central { 505 h.central[i].mcentral.init(spanClass(i)) 506 } 507 508 sp := (*slice)(unsafe.Pointer(&h.spans)) 509 sp.array = unsafe.Pointer(spansStart) 510 sp.len = 0 511 sp.cap = int(spansBytes / sys.PtrSize) 512 513 // Map metadata structures. But don't map race detector memory 514 // since we're not actually growing the arena here (and TSAN 515 // gets mad if you map 0 bytes). 516 h.setArenaUsed(h.arena_used, false) 517 } 518 519 // setArenaUsed extends the usable arena to address arena_used and 520 // maps auxiliary VM regions for any newly usable arena space. 521 // 522 // racemap indicates that this memory should be managed by the race 523 // detector. racemap should be true unless this is covering a VM hole. 524 func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) { 525 // Map auxiliary structures *before* h.arena_used is updated. 526 // Waiting to update arena_used until after the memory has been mapped 527 // avoids faults when other threads try access these regions immediately 528 // after observing the change to arena_used. 529 530 // Map the bitmap. 531 h.mapBits(arena_used) 532 533 // Map spans array. 534 h.mapSpans(arena_used) 535 536 // Tell the race detector about the new heap memory. 537 if racemap && raceenabled { 538 racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used) 539 } 540 541 h.arena_used = arena_used 542 } 543 544 // mapSpans makes sure that the spans are mapped 545 // up to the new value of arena_used. 546 // 547 // Don't call this directly. Call mheap.setArenaUsed. 548 func (h *mheap) mapSpans(arena_used uintptr) { 549 // Map spans array, PageSize at a time. 550 n := arena_used 551 n -= h.arena_start 552 n = n / _PageSize * sys.PtrSize 553 n = round(n, physPageSize) 554 need := n / unsafe.Sizeof(h.spans[0]) 555 have := uintptr(len(h.spans)) 556 if have >= need { 557 return 558 } 559 h.spans = h.spans[:need] 560 sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys) 561 } 562 563 // Sweeps spans in list until reclaims at least npages into heap. 564 // Returns the actual number of pages reclaimed. 565 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { 566 n := uintptr(0) 567 sg := mheap_.sweepgen 568 retry: 569 for s := list.first; s != nil; s = s.next { 570 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { 571 list.remove(s) 572 // swept spans are at the end of the list 573 list.insertBack(s) // Puts it back on a busy list. s is not in the treap at this point. 574 unlock(&h.lock) 575 snpages := s.npages 576 if s.sweep(false) { 577 n += snpages 578 } 579 lock(&h.lock) 580 if n >= npages { 581 return n 582 } 583 // the span could have been moved elsewhere 584 goto retry 585 } 586 if s.sweepgen == sg-1 { 587 // the span is being sweept by background sweeper, skip 588 continue 589 } 590 // already swept empty span, 591 // all subsequent ones must also be either swept or in process of sweeping 592 break 593 } 594 return n 595 } 596 597 // Sweeps and reclaims at least npage pages into heap. 598 // Called before allocating npage pages. 599 func (h *mheap) reclaim(npage uintptr) { 600 // First try to sweep busy spans with large objects of size >= npage, 601 // this has good chances of reclaiming the necessary space. 602 for i := int(npage); i < len(h.busy); i++ { 603 if h.reclaimList(&h.busy[i], npage) != 0 { 604 return // Bingo! 605 } 606 } 607 608 // Then -- even larger objects. 609 if h.reclaimList(&h.busylarge, npage) != 0 { 610 return // Bingo! 611 } 612 613 // Now try smaller objects. 614 // One such object is not enough, so we need to reclaim several of them. 615 reclaimed := uintptr(0) 616 for i := 0; i < int(npage) && i < len(h.busy); i++ { 617 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) 618 if reclaimed >= npage { 619 return 620 } 621 } 622 623 // Now sweep everything that is not yet swept. 624 unlock(&h.lock) 625 for { 626 n := sweepone() 627 if n == ^uintptr(0) { // all spans are swept 628 break 629 } 630 reclaimed += n 631 if reclaimed >= npage { 632 break 633 } 634 } 635 lock(&h.lock) 636 } 637 638 // Allocate a new span of npage pages from the heap for GC'd memory 639 // and record its size class in the HeapMap and HeapMapCache. 640 func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { 641 _g_ := getg() 642 if _g_ != _g_.m.g0 { 643 throw("_mheap_alloc not on g0 stack") 644 } 645 lock(&h.lock) 646 647 // To prevent excessive heap growth, before allocating n pages 648 // we need to sweep and reclaim at least n pages. 649 if h.sweepdone == 0 { 650 // TODO(austin): This tends to sweep a large number of 651 // spans in order to find a few completely free spans 652 // (for example, in the garbage benchmark, this sweeps 653 // ~30x the number of pages its trying to allocate). 654 // If GC kept a bit for whether there were any marks 655 // in a span, we could release these free spans 656 // at the end of GC and eliminate this entirely. 657 if trace.enabled { 658 traceGCSweepStart() 659 } 660 h.reclaim(npage) 661 if trace.enabled { 662 traceGCSweepDone() 663 } 664 } 665 666 // transfer stats from cache to global 667 memstats.heap_scan += uint64(_g_.m.mcache.local_scan) 668 _g_.m.mcache.local_scan = 0 669 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) 670 _g_.m.mcache.local_tinyallocs = 0 671 672 s := h.allocSpanLocked(npage, &memstats.heap_inuse) 673 if s != nil { 674 // Record span info, because gc needs to be 675 // able to map interior pointer to containing span. 676 atomic.Store(&s.sweepgen, h.sweepgen) 677 h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list. 678 s.state = _MSpanInUse 679 s.allocCount = 0 680 s.spanclass = spanclass 681 if sizeclass := spanclass.sizeclass(); sizeclass == 0 { 682 s.elemsize = s.npages << _PageShift 683 s.divShift = 0 684 s.divMul = 0 685 s.divShift2 = 0 686 s.baseMask = 0 687 } else { 688 s.elemsize = uintptr(class_to_size[sizeclass]) 689 m := &class_to_divmagic[sizeclass] 690 s.divShift = m.shift 691 s.divMul = m.mul 692 s.divShift2 = m.shift2 693 s.baseMask = m.baseMask 694 } 695 696 // update stats, sweep lists 697 h.pagesInUse += uint64(npage) 698 if large { 699 memstats.heap_objects++ 700 mheap_.largealloc += uint64(s.elemsize) 701 mheap_.nlargealloc++ 702 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) 703 // Swept spans are at the end of lists. 704 if s.npages < uintptr(len(h.busy)) { 705 h.busy[s.npages].insertBack(s) 706 } else { 707 h.busylarge.insertBack(s) 708 } 709 } 710 } 711 // heap_scan and heap_live were updated. 712 if gcBlackenEnabled != 0 { 713 gcController.revise() 714 } 715 716 if trace.enabled { 717 traceHeapAlloc() 718 } 719 720 // h.spans is accessed concurrently without synchronization 721 // from other threads. Hence, there must be a store/store 722 // barrier here to ensure the writes to h.spans above happen 723 // before the caller can publish a pointer p to an object 724 // allocated from s. As soon as this happens, the garbage 725 // collector running on another processor could read p and 726 // look up s in h.spans. The unlock acts as the barrier to 727 // order these writes. On the read side, the data dependency 728 // between p and the index in h.spans orders the reads. 729 unlock(&h.lock) 730 return s 731 } 732 733 func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan { 734 // Don't do any operations that lock the heap on the G stack. 735 // It might trigger stack growth, and the stack growth code needs 736 // to be able to allocate heap. 737 var s *mspan 738 systemstack(func() { 739 s = h.alloc_m(npage, spanclass, large) 740 }) 741 742 if s != nil { 743 if needzero && s.needzero != 0 { 744 memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift) 745 } 746 s.needzero = 0 747 } 748 return s 749 } 750 751 // allocManual allocates a manually-managed span of npage pages. 752 // allocManual returns nil if allocation fails. 753 // 754 // allocManual adds the bytes used to *stat, which should be a 755 // memstats in-use field. Unlike allocations in the GC'd heap, the 756 // allocation does *not* count toward heap_inuse or heap_sys. 757 // 758 // The memory backing the returned span may not be zeroed if 759 // span.needzero is set. 760 // 761 // allocManual must be called on the system stack to prevent stack 762 // growth. Since this is used by the stack allocator, stack growth 763 // during allocManual would self-deadlock. 764 // 765 //go:systemstack 766 func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { 767 lock(&h.lock) 768 s := h.allocSpanLocked(npage, stat) 769 if s != nil { 770 s.state = _MSpanManual 771 s.manualFreeList = 0 772 s.allocCount = 0 773 s.spanclass = 0 774 s.nelems = 0 775 s.elemsize = 0 776 s.limit = s.base() + s.npages<<_PageShift 777 // Manually manged memory doesn't count toward heap_sys. 778 memstats.heap_sys -= uint64(s.npages << _PageShift) 779 } 780 781 // This unlock acts as a release barrier. See mheap.alloc_m. 782 unlock(&h.lock) 783 784 return s 785 } 786 787 // Allocates a span of the given size. h must be locked. 788 // The returned span has been removed from the 789 // free list, but its state is still MSpanFree. 790 func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { 791 var list *mSpanList 792 var s *mspan 793 794 // Try in fixed-size lists up to max. 795 for i := int(npage); i < len(h.free); i++ { 796 list = &h.free[i] 797 if !list.isEmpty() { 798 s = list.first 799 list.remove(s) 800 goto HaveSpan 801 } 802 } 803 // Best fit in list of large spans. 804 s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us 805 if s == nil { 806 if !h.grow(npage) { 807 return nil 808 } 809 s = h.allocLarge(npage) 810 if s == nil { 811 return nil 812 } 813 } 814 815 HaveSpan: 816 // Mark span in use. 817 if s.state != _MSpanFree { 818 throw("MHeap_AllocLocked - MSpan not free") 819 } 820 if s.npages < npage { 821 throw("MHeap_AllocLocked - bad npages") 822 } 823 if s.npreleased > 0 { 824 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) 825 memstats.heap_released -= uint64(s.npreleased << _PageShift) 826 s.npreleased = 0 827 } 828 829 if s.npages > npage { 830 // Trim extra and put it back in the heap. 831 t := (*mspan)(h.spanalloc.alloc()) 832 t.init(s.base()+npage<<_PageShift, s.npages-npage) 833 s.npages = npage 834 p := (t.base() - h.arena_start) >> _PageShift 835 if p > 0 { 836 h.spans[p-1] = s 837 } 838 h.spans[p] = t 839 h.spans[p+t.npages-1] = t 840 t.needzero = s.needzero 841 s.state = _MSpanManual // prevent coalescing with s 842 t.state = _MSpanManual 843 h.freeSpanLocked(t, false, false, s.unusedsince) 844 s.state = _MSpanFree 845 } 846 s.unusedsince = 0 847 848 p := (s.base() - h.arena_start) >> _PageShift 849 for n := uintptr(0); n < npage; n++ { 850 h.spans[p+n] = s 851 } 852 853 *stat += uint64(npage << _PageShift) 854 memstats.heap_idle -= uint64(npage << _PageShift) 855 856 //println("spanalloc", hex(s.start<<_PageShift)) 857 if s.inList() { 858 throw("still in list") 859 } 860 return s 861 } 862 863 // Large spans have a minimum size of 1MByte. The maximum number of large spans to support 864 // 1TBytes is 1 million, experimentation using random sizes indicates that the depth of 865 // the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced 866 // by a perfectly balanced tree with a a depth of 20. Twice that is an acceptable 40. 867 func (h *mheap) isLargeSpan(npages uintptr) bool { 868 return npages >= uintptr(len(h.free)) 869 } 870 871 // allocLarge allocates a span of at least npage pages from the treap of large spans. 872 // Returns nil if no such span currently exists. 873 func (h *mheap) allocLarge(npage uintptr) *mspan { 874 // Search treap for smallest span with >= npage pages. 875 return h.freelarge.remove(npage) 876 } 877 878 // Try to add at least npage pages of memory to the heap, 879 // returning whether it worked. 880 // 881 // h must be locked. 882 func (h *mheap) grow(npage uintptr) bool { 883 // Ask for a big chunk, to reduce the number of mappings 884 // the operating system needs to track; also amortizes 885 // the overhead of an operating system mapping. 886 // Allocate a multiple of 64kB. 887 npage = round(npage, (64<<10)/_PageSize) 888 ask := npage << _PageShift 889 if ask < _HeapAllocChunk { 890 ask = _HeapAllocChunk 891 } 892 893 v := h.sysAlloc(ask) 894 if v == nil { 895 if ask > npage<<_PageShift { 896 ask = npage << _PageShift 897 v = h.sysAlloc(ask) 898 } 899 if v == nil { 900 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") 901 return false 902 } 903 } 904 905 // Create a fake "in use" span and free it, so that the 906 // right coalescing happens. 907 s := (*mspan)(h.spanalloc.alloc()) 908 s.init(uintptr(v), ask>>_PageShift) 909 p := (s.base() - h.arena_start) >> _PageShift 910 for i := p; i < p+s.npages; i++ { 911 h.spans[i] = s 912 } 913 atomic.Store(&s.sweepgen, h.sweepgen) 914 s.state = _MSpanInUse 915 h.pagesInUse += uint64(s.npages) 916 h.freeSpanLocked(s, false, true, 0) 917 return true 918 } 919 920 // Look up the span at the given address. 921 // Address is guaranteed to be in map 922 // and is guaranteed to be start or end of span. 923 func (h *mheap) lookup(v unsafe.Pointer) *mspan { 924 p := uintptr(v) 925 p -= h.arena_start 926 return h.spans[p>>_PageShift] 927 } 928 929 // Look up the span at the given address. 930 // Address is *not* guaranteed to be in map 931 // and may be anywhere in the span. 932 // Map entries for the middle of a span are only 933 // valid for allocated spans. Free spans may have 934 // other garbage in their middles, so we have to 935 // check for that. 936 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { 937 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { 938 return nil 939 } 940 s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift] 941 if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { 942 return nil 943 } 944 return s 945 } 946 947 // Free the span back into the heap. 948 func (h *mheap) freeSpan(s *mspan, acct int32) { 949 systemstack(func() { 950 mp := getg().m 951 lock(&h.lock) 952 memstats.heap_scan += uint64(mp.mcache.local_scan) 953 mp.mcache.local_scan = 0 954 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) 955 mp.mcache.local_tinyallocs = 0 956 if msanenabled { 957 // Tell msan that this entire span is no longer in use. 958 base := unsafe.Pointer(s.base()) 959 bytes := s.npages << _PageShift 960 msanfree(base, bytes) 961 } 962 if acct != 0 { 963 memstats.heap_objects-- 964 } 965 if gcBlackenEnabled != 0 { 966 // heap_scan changed. 967 gcController.revise() 968 } 969 h.freeSpanLocked(s, true, true, 0) 970 unlock(&h.lock) 971 }) 972 } 973 974 // freeManual frees a manually-managed span returned by allocManual. 975 // stat must be the same as the stat passed to the allocManual that 976 // allocated s. 977 // 978 // This must only be called when gcphase == _GCoff. See mSpanState for 979 // an explanation. 980 // 981 // freeManual must be called on the system stack to prevent stack 982 // growth, just like allocManual. 983 // 984 //go:systemstack 985 func (h *mheap) freeManual(s *mspan, stat *uint64) { 986 s.needzero = 1 987 lock(&h.lock) 988 *stat -= uint64(s.npages << _PageShift) 989 memstats.heap_sys += uint64(s.npages << _PageShift) 990 h.freeSpanLocked(s, false, true, 0) 991 unlock(&h.lock) 992 } 993 994 // s must be on a busy list (h.busy or h.busylarge) or unlinked. 995 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { 996 switch s.state { 997 case _MSpanManual: 998 if s.allocCount != 0 { 999 throw("MHeap_FreeSpanLocked - invalid stack free") 1000 } 1001 case _MSpanInUse: 1002 if s.allocCount != 0 || s.sweepgen != h.sweepgen { 1003 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 1004 throw("MHeap_FreeSpanLocked - invalid free") 1005 } 1006 h.pagesInUse -= uint64(s.npages) 1007 default: 1008 throw("MHeap_FreeSpanLocked - invalid span state") 1009 } 1010 1011 if acctinuse { 1012 memstats.heap_inuse -= uint64(s.npages << _PageShift) 1013 } 1014 if acctidle { 1015 memstats.heap_idle += uint64(s.npages << _PageShift) 1016 } 1017 s.state = _MSpanFree 1018 if s.inList() { 1019 h.busyList(s.npages).remove(s) 1020 } 1021 1022 // Stamp newly unused spans. The scavenger will use that 1023 // info to potentially give back some pages to the OS. 1024 s.unusedsince = unusedsince 1025 if unusedsince == 0 { 1026 s.unusedsince = nanotime() 1027 } 1028 s.npreleased = 0 1029 1030 // Coalesce with earlier, later spans. 1031 p := (s.base() - h.arena_start) >> _PageShift 1032 if p > 0 { 1033 before := h.spans[p-1] 1034 if before != nil && before.state == _MSpanFree { 1035 // Now adjust s. 1036 s.startAddr = before.startAddr 1037 s.npages += before.npages 1038 s.npreleased = before.npreleased // absorb released pages 1039 s.needzero |= before.needzero 1040 p -= before.npages 1041 h.spans[p] = s 1042 // The size is potentially changing so the treap needs to delete adjacent nodes and 1043 // insert back as a combined node. 1044 if h.isLargeSpan(before.npages) { 1045 // We have a t, it is large so it has to be in the treap so we can remove it. 1046 h.freelarge.removeSpan(before) 1047 } else { 1048 h.freeList(before.npages).remove(before) 1049 } 1050 before.state = _MSpanDead 1051 h.spanalloc.free(unsafe.Pointer(before)) 1052 } 1053 } 1054 1055 // Now check to see if next (greater addresses) span is free and can be coalesced. 1056 if (p + s.npages) < uintptr(len(h.spans)) { 1057 after := h.spans[p+s.npages] 1058 if after != nil && after.state == _MSpanFree { 1059 s.npages += after.npages 1060 s.npreleased += after.npreleased 1061 s.needzero |= after.needzero 1062 h.spans[p+s.npages-1] = s 1063 if h.isLargeSpan(after.npages) { 1064 h.freelarge.removeSpan(after) 1065 } else { 1066 h.freeList(after.npages).remove(after) 1067 } 1068 after.state = _MSpanDead 1069 h.spanalloc.free(unsafe.Pointer(after)) 1070 } 1071 } 1072 1073 // Insert s into appropriate list or treap. 1074 if h.isLargeSpan(s.npages) { 1075 h.freelarge.insert(s) 1076 } else { 1077 h.freeList(s.npages).insert(s) 1078 } 1079 } 1080 1081 func (h *mheap) freeList(npages uintptr) *mSpanList { 1082 return &h.free[npages] 1083 } 1084 1085 func (h *mheap) busyList(npages uintptr) *mSpanList { 1086 if npages < uintptr(len(h.busy)) { 1087 return &h.busy[npages] 1088 } 1089 return &h.busylarge 1090 } 1091 1092 func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr { 1093 s := t.spanKey 1094 var sumreleased uintptr 1095 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { 1096 start := s.base() 1097 end := start + s.npages<<_PageShift 1098 if physPageSize > _PageSize { 1099 // We can only release pages in 1100 // physPageSize blocks, so round start 1101 // and end in. (Otherwise, madvise 1102 // will round them *out* and release 1103 // more memory than we want.) 1104 start = (start + physPageSize - 1) &^ (physPageSize - 1) 1105 end &^= physPageSize - 1 1106 if end <= start { 1107 // start and end don't span a 1108 // whole physical page. 1109 return sumreleased 1110 } 1111 } 1112 len := end - start 1113 released := len - (s.npreleased << _PageShift) 1114 if physPageSize > _PageSize && released == 0 { 1115 return sumreleased 1116 } 1117 memstats.heap_released += uint64(released) 1118 sumreleased += released 1119 s.npreleased = len >> _PageShift 1120 sysUnused(unsafe.Pointer(start), len) 1121 } 1122 return sumreleased 1123 } 1124 1125 func scavengelist(list *mSpanList, now, limit uint64) uintptr { 1126 if list.isEmpty() { 1127 return 0 1128 } 1129 1130 var sumreleased uintptr 1131 for s := list.first; s != nil; s = s.next { 1132 if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages { 1133 continue 1134 } 1135 start := s.base() 1136 end := start + s.npages<<_PageShift 1137 if physPageSize > _PageSize { 1138 // We can only release pages in 1139 // physPageSize blocks, so round start 1140 // and end in. (Otherwise, madvise 1141 // will round them *out* and release 1142 // more memory than we want.) 1143 start = (start + physPageSize - 1) &^ (physPageSize - 1) 1144 end &^= physPageSize - 1 1145 if end <= start { 1146 // start and end don't span a 1147 // whole physical page. 1148 continue 1149 } 1150 } 1151 len := end - start 1152 1153 released := len - (s.npreleased << _PageShift) 1154 if physPageSize > _PageSize && released == 0 { 1155 continue 1156 } 1157 memstats.heap_released += uint64(released) 1158 sumreleased += released 1159 s.npreleased = len >> _PageShift 1160 sysUnused(unsafe.Pointer(start), len) 1161 } 1162 return sumreleased 1163 } 1164 1165 func (h *mheap) scavenge(k int32, now, limit uint64) { 1166 // Disallow malloc or panic while holding the heap lock. We do 1167 // this here because this is an non-mallocgc entry-point to 1168 // the mheap API. 1169 gp := getg() 1170 gp.m.mallocing++ 1171 lock(&h.lock) 1172 var sumreleased uintptr 1173 for i := 0; i < len(h.free); i++ { 1174 sumreleased += scavengelist(&h.free[i], now, limit) 1175 } 1176 sumreleased += scavengetreap(h.freelarge.treap, now, limit) 1177 unlock(&h.lock) 1178 gp.m.mallocing-- 1179 1180 if debug.gctrace > 0 { 1181 if sumreleased > 0 { 1182 print("scvg", k, ": ", sumreleased>>20, " MB released\n") 1183 } 1184 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") 1185 } 1186 } 1187 1188 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 1189 func runtime_debug_freeOSMemory() { 1190 GC() 1191 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) 1192 } 1193 1194 // Initialize a new span with the given start and npages. 1195 func (span *mspan) init(base uintptr, npages uintptr) { 1196 // span is *not* zeroed. 1197 span.next = nil 1198 span.prev = nil 1199 span.list = nil 1200 span.startAddr = base 1201 span.npages = npages 1202 span.allocCount = 0 1203 span.spanclass = 0 1204 span.incache = false 1205 span.elemsize = 0 1206 span.state = _MSpanDead 1207 span.unusedsince = 0 1208 span.npreleased = 0 1209 span.speciallock.key = 0 1210 span.specials = nil 1211 span.needzero = 0 1212 span.freeindex = 0 1213 span.allocBits = nil 1214 span.gcmarkBits = nil 1215 } 1216 1217 func (span *mspan) inList() bool { 1218 return span.list != nil 1219 } 1220 1221 // Initialize an empty doubly-linked list. 1222 func (list *mSpanList) init() { 1223 list.first = nil 1224 list.last = nil 1225 } 1226 1227 func (list *mSpanList) remove(span *mspan) { 1228 if span.list != list { 1229 print("runtime: failed MSpanList_Remove span.npages=", span.npages, 1230 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n") 1231 throw("MSpanList_Remove") 1232 } 1233 if list.first == span { 1234 list.first = span.next 1235 } else { 1236 span.prev.next = span.next 1237 } 1238 if list.last == span { 1239 list.last = span.prev 1240 } else { 1241 span.next.prev = span.prev 1242 } 1243 span.next = nil 1244 span.prev = nil 1245 span.list = nil 1246 } 1247 1248 func (list *mSpanList) isEmpty() bool { 1249 return list.first == nil 1250 } 1251 1252 func (list *mSpanList) insert(span *mspan) { 1253 if span.next != nil || span.prev != nil || span.list != nil { 1254 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list) 1255 throw("MSpanList_Insert") 1256 } 1257 span.next = list.first 1258 if list.first != nil { 1259 // The list contains at least one span; link it in. 1260 // The last span in the list doesn't change. 1261 list.first.prev = span 1262 } else { 1263 // The list contains no spans, so this is also the last span. 1264 list.last = span 1265 } 1266 list.first = span 1267 span.list = list 1268 } 1269 1270 func (list *mSpanList) insertBack(span *mspan) { 1271 if span.next != nil || span.prev != nil || span.list != nil { 1272 println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list) 1273 throw("MSpanList_InsertBack") 1274 } 1275 span.prev = list.last 1276 if list.last != nil { 1277 // The list contains at least one span. 1278 list.last.next = span 1279 } else { 1280 // The list contains no spans, so this is also the first span. 1281 list.first = span 1282 } 1283 list.last = span 1284 span.list = list 1285 } 1286 1287 // takeAll removes all spans from other and inserts them at the front 1288 // of list. 1289 func (list *mSpanList) takeAll(other *mSpanList) { 1290 if other.isEmpty() { 1291 return 1292 } 1293 1294 // Reparent everything in other to list. 1295 for s := other.first; s != nil; s = s.next { 1296 s.list = list 1297 } 1298 1299 // Concatenate the lists. 1300 if list.isEmpty() { 1301 *list = *other 1302 } else { 1303 // Neither list is empty. Put other before list. 1304 other.last.next = list.first 1305 list.first.prev = other.last 1306 list.first = other.first 1307 } 1308 1309 other.first, other.last = nil, nil 1310 } 1311 1312 const ( 1313 _KindSpecialFinalizer = 1 1314 _KindSpecialProfile = 2 1315 // Note: The finalizer special must be first because if we're freeing 1316 // an object, a finalizer special will cause the freeing operation 1317 // to abort, and we want to keep the other special records around 1318 // if that happens. 1319 ) 1320 1321 //go:notinheap 1322 type special struct { 1323 next *special // linked list in span 1324 offset uint16 // span offset of object 1325 kind byte // kind of special 1326 } 1327 1328 // Adds the special record s to the list of special records for 1329 // the object p. All fields of s should be filled in except for 1330 // offset & next, which this routine will fill in. 1331 // Returns true if the special was successfully added, false otherwise. 1332 // (The add will fail only if a record with the same p and s->kind 1333 // already exists.) 1334 func addspecial(p unsafe.Pointer, s *special) bool { 1335 span := mheap_.lookupMaybe(p) 1336 if span == nil { 1337 throw("addspecial on invalid pointer") 1338 } 1339 1340 // Ensure that the span is swept. 1341 // Sweeping accesses the specials list w/o locks, so we have 1342 // to synchronize with it. And it's just much safer. 1343 mp := acquirem() 1344 span.ensureSwept() 1345 1346 offset := uintptr(p) - span.base() 1347 kind := s.kind 1348 1349 lock(&span.speciallock) 1350 1351 // Find splice point, check for existing record. 1352 t := &span.specials 1353 for { 1354 x := *t 1355 if x == nil { 1356 break 1357 } 1358 if offset == uintptr(x.offset) && kind == x.kind { 1359 unlock(&span.speciallock) 1360 releasem(mp) 1361 return false // already exists 1362 } 1363 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) { 1364 break 1365 } 1366 t = &x.next 1367 } 1368 1369 // Splice in record, fill in offset. 1370 s.offset = uint16(offset) 1371 s.next = *t 1372 *t = s 1373 unlock(&span.speciallock) 1374 releasem(mp) 1375 1376 return true 1377 } 1378 1379 // Removes the Special record of the given kind for the object p. 1380 // Returns the record if the record existed, nil otherwise. 1381 // The caller must FixAlloc_Free the result. 1382 func removespecial(p unsafe.Pointer, kind uint8) *special { 1383 span := mheap_.lookupMaybe(p) 1384 if span == nil { 1385 throw("removespecial on invalid pointer") 1386 } 1387 1388 // Ensure that the span is swept. 1389 // Sweeping accesses the specials list w/o locks, so we have 1390 // to synchronize with it. And it's just much safer. 1391 mp := acquirem() 1392 span.ensureSwept() 1393 1394 offset := uintptr(p) - span.base() 1395 1396 lock(&span.speciallock) 1397 t := &span.specials 1398 for { 1399 s := *t 1400 if s == nil { 1401 break 1402 } 1403 // This function is used for finalizers only, so we don't check for 1404 // "interior" specials (p must be exactly equal to s->offset). 1405 if offset == uintptr(s.offset) && kind == s.kind { 1406 *t = s.next 1407 unlock(&span.speciallock) 1408 releasem(mp) 1409 return s 1410 } 1411 t = &s.next 1412 } 1413 unlock(&span.speciallock) 1414 releasem(mp) 1415 return nil 1416 } 1417 1418 // The described object has a finalizer set for it. 1419 // 1420 // specialfinalizer is allocated from non-GC'd memory, so any heap 1421 // pointers must be specially handled. 1422 // 1423 //go:notinheap 1424 type specialfinalizer struct { 1425 special special 1426 fn *funcval // May be a heap pointer. 1427 nret uintptr 1428 fint *_type // May be a heap pointer, but always live. 1429 ot *ptrtype // May be a heap pointer, but always live. 1430 } 1431 1432 // Adds a finalizer to the object p. Returns true if it succeeded. 1433 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 1434 lock(&mheap_.speciallock) 1435 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 1436 unlock(&mheap_.speciallock) 1437 s.special.kind = _KindSpecialFinalizer 1438 s.fn = f 1439 s.nret = nret 1440 s.fint = fint 1441 s.ot = ot 1442 if addspecial(p, &s.special) { 1443 // This is responsible for maintaining the same 1444 // GC-related invariants as markrootSpans in any 1445 // situation where it's possible that markrootSpans 1446 // has already run but mark termination hasn't yet. 1447 if gcphase != _GCoff { 1448 _, base, _ := findObject(p) 1449 mp := acquirem() 1450 gcw := &mp.p.ptr().gcw 1451 // Mark everything reachable from the object 1452 // so it's retained for the finalizer. 1453 scanobject(uintptr(base), gcw) 1454 // Mark the finalizer itself, since the 1455 // special isn't part of the GC'd heap. 1456 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) 1457 if gcBlackenPromptly { 1458 gcw.dispose() 1459 } 1460 releasem(mp) 1461 } 1462 return true 1463 } 1464 1465 // There was an old finalizer 1466 lock(&mheap_.speciallock) 1467 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1468 unlock(&mheap_.speciallock) 1469 return false 1470 } 1471 1472 // Removes the finalizer (if any) from the object p. 1473 func removefinalizer(p unsafe.Pointer) { 1474 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 1475 if s == nil { 1476 return // there wasn't a finalizer to remove 1477 } 1478 lock(&mheap_.speciallock) 1479 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1480 unlock(&mheap_.speciallock) 1481 } 1482 1483 // The described object is being heap profiled. 1484 // 1485 //go:notinheap 1486 type specialprofile struct { 1487 special special 1488 b *bucket 1489 } 1490 1491 // Set the heap profile bucket associated with addr to b. 1492 func setprofilebucket(p unsafe.Pointer, b *bucket) { 1493 lock(&mheap_.speciallock) 1494 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 1495 unlock(&mheap_.speciallock) 1496 s.special.kind = _KindSpecialProfile 1497 s.b = b 1498 if !addspecial(p, &s.special) { 1499 throw("setprofilebucket: profile already set") 1500 } 1501 } 1502 1503 // Do whatever cleanup needs to be done to deallocate s. It has 1504 // already been unlinked from the MSpan specials list. 1505 func freespecial(s *special, p unsafe.Pointer, size uintptr) { 1506 switch s.kind { 1507 case _KindSpecialFinalizer: 1508 sf := (*specialfinalizer)(unsafe.Pointer(s)) 1509 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 1510 lock(&mheap_.speciallock) 1511 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 1512 unlock(&mheap_.speciallock) 1513 case _KindSpecialProfile: 1514 sp := (*specialprofile)(unsafe.Pointer(s)) 1515 mProf_Free(sp.b, size) 1516 lock(&mheap_.speciallock) 1517 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 1518 unlock(&mheap_.speciallock) 1519 default: 1520 throw("bad special kind") 1521 panic("not reached") 1522 } 1523 } 1524 1525 // gcBits is an alloc/mark bitmap. This is always used as *gcBits. 1526 // 1527 //go:notinheap 1528 type gcBits uint8 1529 1530 // bytep returns a pointer to the n'th byte of b. 1531 func (b *gcBits) bytep(n uintptr) *uint8 { 1532 return addb((*uint8)(b), n) 1533 } 1534 1535 // bitp returns a pointer to the byte containing bit n and a mask for 1536 // selecting that bit from *bytep. 1537 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { 1538 return b.bytep(n / 8), 1 << (n % 8) 1539 } 1540 1541 const gcBitsChunkBytes = uintptr(64 << 10) 1542 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) 1543 1544 type gcBitsHeader struct { 1545 free uintptr // free is the index into bits of the next free byte. 1546 next uintptr // *gcBits triggers recursive type bug. (issue 14620) 1547 } 1548 1549 //go:notinheap 1550 type gcBitsArena struct { 1551 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. 1552 free uintptr // free is the index into bits of the next free byte; read/write atomically 1553 next *gcBitsArena 1554 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits 1555 } 1556 1557 var gcBitsArenas struct { 1558 lock mutex 1559 free *gcBitsArena 1560 next *gcBitsArena // Read atomically. Write atomically under lock. 1561 current *gcBitsArena 1562 previous *gcBitsArena 1563 } 1564 1565 // tryAlloc allocates from b or returns nil if b does not have enough room. 1566 // This is safe to call concurrently. 1567 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { 1568 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { 1569 return nil 1570 } 1571 // Try to allocate from this block. 1572 end := atomic.Xadduintptr(&b.free, bytes) 1573 if end > uintptr(len(b.bits)) { 1574 return nil 1575 } 1576 // There was enough room. 1577 start := end - bytes 1578 return &b.bits[start] 1579 } 1580 1581 // newMarkBits returns a pointer to 8 byte aligned bytes 1582 // to be used for a span's mark bits. 1583 func newMarkBits(nelems uintptr) *gcBits { 1584 blocksNeeded := uintptr((nelems + 63) / 64) 1585 bytesNeeded := blocksNeeded * 8 1586 1587 // Try directly allocating from the current head arena. 1588 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) 1589 if p := head.tryAlloc(bytesNeeded); p != nil { 1590 return p 1591 } 1592 1593 // There's not enough room in the head arena. We may need to 1594 // allocate a new arena. 1595 lock(&gcBitsArenas.lock) 1596 // Try the head arena again, since it may have changed. Now 1597 // that we hold the lock, the list head can't change, but its 1598 // free position still can. 1599 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 1600 unlock(&gcBitsArenas.lock) 1601 return p 1602 } 1603 1604 // Allocate a new arena. This may temporarily drop the lock. 1605 fresh := newArenaMayUnlock() 1606 // If newArenaMayUnlock dropped the lock, another thread may 1607 // have put a fresh arena on the "next" list. Try allocating 1608 // from next again. 1609 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 1610 // Put fresh back on the free list. 1611 // TODO: Mark it "already zeroed" 1612 fresh.next = gcBitsArenas.free 1613 gcBitsArenas.free = fresh 1614 unlock(&gcBitsArenas.lock) 1615 return p 1616 } 1617 1618 // Allocate from the fresh arena. We haven't linked it in yet, so 1619 // this cannot race and is guaranteed to succeed. 1620 p := fresh.tryAlloc(bytesNeeded) 1621 if p == nil { 1622 throw("markBits overflow") 1623 } 1624 1625 // Add the fresh arena to the "next" list. 1626 fresh.next = gcBitsArenas.next 1627 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh)) 1628 1629 unlock(&gcBitsArenas.lock) 1630 return p 1631 } 1632 1633 // newAllocBits returns a pointer to 8 byte aligned bytes 1634 // to be used for this span's alloc bits. 1635 // newAllocBits is used to provide newly initialized spans 1636 // allocation bits. For spans not being initialized the 1637 // the mark bits are repurposed as allocation bits when 1638 // the span is swept. 1639 func newAllocBits(nelems uintptr) *gcBits { 1640 return newMarkBits(nelems) 1641 } 1642 1643 // nextMarkBitArenaEpoch establishes a new epoch for the arenas 1644 // holding the mark bits. The arenas are named relative to the 1645 // current GC cycle which is demarcated by the call to finishweep_m. 1646 // 1647 // All current spans have been swept. 1648 // During that sweep each span allocated room for its gcmarkBits in 1649 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current 1650 // where the GC will mark objects and after each span is swept these bits 1651 // will be used to allocate objects. 1652 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's 1653 // gcAllocBits live until all the spans have been swept during this GC cycle. 1654 // The span's sweep extinguishes all the references to gcBitsArenas.previous 1655 // by pointing gcAllocBits into the gcBitsArenas.current. 1656 // The gcBitsArenas.previous is released to the gcBitsArenas.free list. 1657 func nextMarkBitArenaEpoch() { 1658 lock(&gcBitsArenas.lock) 1659 if gcBitsArenas.previous != nil { 1660 if gcBitsArenas.free == nil { 1661 gcBitsArenas.free = gcBitsArenas.previous 1662 } else { 1663 // Find end of previous arenas. 1664 last := gcBitsArenas.previous 1665 for last = gcBitsArenas.previous; last.next != nil; last = last.next { 1666 } 1667 last.next = gcBitsArenas.free 1668 gcBitsArenas.free = gcBitsArenas.previous 1669 } 1670 } 1671 gcBitsArenas.previous = gcBitsArenas.current 1672 gcBitsArenas.current = gcBitsArenas.next 1673 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed 1674 unlock(&gcBitsArenas.lock) 1675 } 1676 1677 // newArenaMayUnlock allocates and zeroes a gcBits arena. 1678 // The caller must hold gcBitsArena.lock. This may temporarily release it. 1679 func newArenaMayUnlock() *gcBitsArena { 1680 var result *gcBitsArena 1681 if gcBitsArenas.free == nil { 1682 unlock(&gcBitsArenas.lock) 1683 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys)) 1684 if result == nil { 1685 throw("runtime: cannot allocate memory") 1686 } 1687 lock(&gcBitsArenas.lock) 1688 } else { 1689 result = gcBitsArenas.free 1690 gcBitsArenas.free = gcBitsArenas.free.next 1691 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) 1692 } 1693 result.next = nil 1694 // If result.bits is not 8 byte aligned adjust index so 1695 // that &result.bits[result.free] is 8 byte aligned. 1696 if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 { 1697 result.free = 0 1698 } else { 1699 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) 1700 } 1701 return result 1702 }