github.com/aloncn/graphics-go@v0.0.1/src/runtime/mheap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // Main malloc heap. 18 // The heap itself is the "free[]" and "large" arrays, 19 // but all the other global data is here too. 20 type mheap struct { 21 lock mutex 22 free [_MaxMHeapList]mSpanList // free lists of given length 23 freelarge mSpanList // free lists length >= _MaxMHeapList 24 busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length 25 busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList 26 allspans **mspan // all spans out there 27 gcspans **mspan // copy of allspans referenced by gc marker or sweeper 28 nspan uint32 29 sweepgen uint32 // sweep generation, see comment in mspan 30 sweepdone uint32 // all spans are swept 31 // span lookup 32 spans **mspan 33 spans_mapped uintptr 34 35 // Proportional sweep 36 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock 37 spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically 38 pagesSwept uint64 // pages swept this cycle; updated atomically 39 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 40 // TODO(austin): pagesInUse should be a uintptr, but the 386 41 // compiler can't 8-byte align fields. 42 43 // Malloc stats. 44 largefree uint64 // bytes freed for large objects (>maxsmallsize) 45 nlargefree uint64 // number of frees for large objects (>maxsmallsize) 46 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) 47 48 // range of addresses we might see in the heap 49 bitmap uintptr 50 bitmap_mapped uintptr 51 arena_start uintptr 52 arena_used uintptr // always mHeap_Map{Bits,Spans} before updating 53 arena_end uintptr 54 arena_reserved bool 55 56 // central free lists for small size classes. 57 // the padding makes sure that the MCentrals are 58 // spaced CacheLineSize bytes apart, so that each MCentral.lock 59 // gets its own cache line. 60 central [_NumSizeClasses]struct { 61 mcentral mcentral 62 pad [sys.CacheLineSize]byte 63 } 64 65 spanalloc fixalloc // allocator for span* 66 cachealloc fixalloc // allocator for mcache* 67 specialfinalizeralloc fixalloc // allocator for specialfinalizer* 68 specialprofilealloc fixalloc // allocator for specialprofile* 69 speciallock mutex // lock for special record allocators. 70 } 71 72 var mheap_ mheap 73 74 // An MSpan is a run of pages. 75 // 76 // When a MSpan is in the heap free list, state == MSpanFree 77 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 78 // 79 // When a MSpan is allocated, state == MSpanInUse or MSpanStack 80 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 81 82 // Every MSpan is in one doubly-linked list, 83 // either one of the MHeap's free lists or one of the 84 // MCentral's span lists. 85 86 // An MSpan representing actual memory has state _MSpanInUse, 87 // _MSpanStack, or _MSpanFree. Transitions between these states are 88 // constrained as follows: 89 // 90 // * A span may transition from free to in-use or stack during any GC 91 // phase. 92 // 93 // * During sweeping (gcphase == _GCoff), a span may transition from 94 // in-use to free (as a result of sweeping) or stack to free (as a 95 // result of stacks being freed). 96 // 97 // * During GC (gcphase != _GCoff), a span *must not* transition from 98 // stack or in-use to free. Because concurrent GC may read a pointer 99 // and then look up its span, the span state must be monotonic. 100 const ( 101 _MSpanInUse = iota // allocated for garbage collected heap 102 _MSpanStack // allocated for use by stack allocator 103 _MSpanFree 104 _MSpanDead 105 ) 106 107 // mSpanList heads a linked list of spans. 108 // 109 // Linked list structure is based on BSD's "tail queue" data structure. 110 type mSpanList struct { 111 first *mspan // first span in list, or nil if none 112 last **mspan // last span's next field, or first if none 113 } 114 115 type mspan struct { 116 next *mspan // next span in list, or nil if none 117 prev **mspan // previous span's next field, or list head's first field if none 118 list *mSpanList // For debugging. TODO: Remove. 119 120 start pageID // starting page number 121 npages uintptr // number of pages in span 122 freelist gclinkptr // list of free objects 123 // sweep generation: 124 // if sweepgen == h->sweepgen - 2, the span needs sweeping 125 // if sweepgen == h->sweepgen - 1, the span is currently being swept 126 // if sweepgen == h->sweepgen, the span is swept and ready to use 127 // h->sweepgen is incremented by 2 after every GC 128 129 sweepgen uint32 130 divMul uint32 // for divide by elemsize - divMagic.mul 131 ref uint16 // capacity - number of objects in freelist 132 sizeclass uint8 // size class 133 incache bool // being used by an mcache 134 state uint8 // mspaninuse etc 135 needzero uint8 // needs to be zeroed before allocation 136 divShift uint8 // for divide by elemsize - divMagic.shift 137 divShift2 uint8 // for divide by elemsize - divMagic.shift2 138 elemsize uintptr // computed from sizeclass or from npages 139 unusedsince int64 // first time spotted by gc in mspanfree state 140 npreleased uintptr // number of pages released to the os 141 limit uintptr // end of data in span 142 speciallock mutex // guards specials list 143 specials *special // linked list of special records sorted by offset. 144 baseMask uintptr // if non-0, elemsize is a power of 2, & this will get object allocation base 145 } 146 147 func (s *mspan) base() uintptr { 148 return uintptr(s.start << _PageShift) 149 } 150 151 func (s *mspan) layout() (size, n, total uintptr) { 152 total = s.npages << _PageShift 153 size = s.elemsize 154 if size > 0 { 155 n = total / size 156 } 157 return 158 } 159 160 var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go 161 162 // h_spans is a lookup table to map virtual address page IDs to *mspan. 163 // For allocated spans, their pages map to the span itself. 164 // For free spans, only the lowest and highest pages map to the span itself. Internal 165 // pages map to an arbitrary span. 166 // For pages that have never been allocated, h_spans entries are nil. 167 var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go 168 169 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 170 h := (*mheap)(vh) 171 s := (*mspan)(p) 172 if len(h_allspans) >= cap(h_allspans) { 173 n := 64 * 1024 / sys.PtrSize 174 if n < cap(h_allspans)*3/2 { 175 n = cap(h_allspans) * 3 / 2 176 } 177 var new []*mspan 178 sp := (*slice)(unsafe.Pointer(&new)) 179 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys) 180 if sp.array == nil { 181 throw("runtime: cannot allocate memory") 182 } 183 sp.len = len(h_allspans) 184 sp.cap = n 185 if len(h_allspans) > 0 { 186 copy(new, h_allspans) 187 // Don't free the old array if it's referenced by sweep. 188 // See the comment in mgc.go. 189 if h.allspans != mheap_.gcspans { 190 sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys) 191 } 192 } 193 h_allspans = new 194 h.allspans = (**mspan)(unsafe.Pointer(sp.array)) 195 } 196 h_allspans = append(h_allspans, s) 197 h.nspan = uint32(len(h_allspans)) 198 } 199 200 // inheap reports whether b is a pointer into a (potentially dead) heap object. 201 // It returns false for pointers into stack spans. 202 // Non-preemptible because it is used by write barriers. 203 //go:nowritebarrier 204 //go:nosplit 205 func inheap(b uintptr) bool { 206 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 207 return false 208 } 209 // Not a beginning of a block, consult span table to find the block beginning. 210 k := b >> _PageShift 211 x := k 212 x -= mheap_.arena_start >> _PageShift 213 s := h_spans[x] 214 if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse { 215 return false 216 } 217 return true 218 } 219 220 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. 221 // Use the functions instead. 222 223 // spanOf returns the span of p. If p does not point into the heap or 224 // no span contains p, spanOf returns nil. 225 func spanOf(p uintptr) *mspan { 226 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { 227 return nil 228 } 229 return spanOfUnchecked(p) 230 } 231 232 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 233 // that p points into the heap (that is, mheap_.arena_start <= p < 234 // mheap_.arena_used). 235 func spanOfUnchecked(p uintptr) *mspan { 236 return h_spans[(p-mheap_.arena_start)>>_PageShift] 237 } 238 239 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { 240 _g_ := getg() 241 242 _g_.m.mcache.local_nlookup++ 243 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 { 244 // purge cache stats to prevent overflow 245 lock(&mheap_.lock) 246 purgecachedstats(_g_.m.mcache) 247 unlock(&mheap_.lock) 248 } 249 250 s := mheap_.lookupMaybe(unsafe.Pointer(v)) 251 if sp != nil { 252 *sp = s 253 } 254 if s == nil { 255 if base != nil { 256 *base = 0 257 } 258 if size != nil { 259 *size = 0 260 } 261 return 0 262 } 263 264 p := uintptr(s.start) << _PageShift 265 if s.sizeclass == 0 { 266 // Large object. 267 if base != nil { 268 *base = p 269 } 270 if size != nil { 271 *size = s.npages << _PageShift 272 } 273 return 1 274 } 275 276 n := s.elemsize 277 if base != nil { 278 i := (uintptr(v) - uintptr(p)) / n 279 *base = p + i*n 280 } 281 if size != nil { 282 *size = n 283 } 284 285 return 1 286 } 287 288 // Initialize the heap. 289 func (h *mheap) init(spans_size uintptr) { 290 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 291 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 292 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 293 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 294 295 // h->mapcache needs no init 296 for i := range h.free { 297 h.free[i].init() 298 h.busy[i].init() 299 } 300 301 h.freelarge.init() 302 h.busylarge.init() 303 for i := range h.central { 304 h.central[i].mcentral.init(int32(i)) 305 } 306 307 sp := (*slice)(unsafe.Pointer(&h_spans)) 308 sp.array = unsafe.Pointer(h.spans) 309 sp.len = int(spans_size / sys.PtrSize) 310 sp.cap = int(spans_size / sys.PtrSize) 311 } 312 313 // mHeap_MapSpans makes sure that the spans are mapped 314 // up to the new value of arena_used. 315 // 316 // It must be called with the expected new value of arena_used, 317 // *before* h.arena_used has been updated. 318 // Waiting to update arena_used until after the memory has been mapped 319 // avoids faults when other threads try access the bitmap immediately 320 // after observing the change to arena_used. 321 func (h *mheap) mapSpans(arena_used uintptr) { 322 // Map spans array, PageSize at a time. 323 n := arena_used 324 n -= h.arena_start 325 n = n / _PageSize * sys.PtrSize 326 n = round(n, sys.PhysPageSize) 327 if h.spans_mapped >= n { 328 return 329 } 330 sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) 331 h.spans_mapped = n 332 } 333 334 // Sweeps spans in list until reclaims at least npages into heap. 335 // Returns the actual number of pages reclaimed. 336 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { 337 n := uintptr(0) 338 sg := mheap_.sweepgen 339 retry: 340 for s := list.first; s != nil; s = s.next { 341 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { 342 list.remove(s) 343 // swept spans are at the end of the list 344 list.insertBack(s) 345 unlock(&h.lock) 346 snpages := s.npages 347 if s.sweep(false) { 348 n += snpages 349 } 350 lock(&h.lock) 351 if n >= npages { 352 return n 353 } 354 // the span could have been moved elsewhere 355 goto retry 356 } 357 if s.sweepgen == sg-1 { 358 // the span is being sweept by background sweeper, skip 359 continue 360 } 361 // already swept empty span, 362 // all subsequent ones must also be either swept or in process of sweeping 363 break 364 } 365 return n 366 } 367 368 // Sweeps and reclaims at least npage pages into heap. 369 // Called before allocating npage pages. 370 func (h *mheap) reclaim(npage uintptr) { 371 // First try to sweep busy spans with large objects of size >= npage, 372 // this has good chances of reclaiming the necessary space. 373 for i := int(npage); i < len(h.busy); i++ { 374 if h.reclaimList(&h.busy[i], npage) != 0 { 375 return // Bingo! 376 } 377 } 378 379 // Then -- even larger objects. 380 if h.reclaimList(&h.busylarge, npage) != 0 { 381 return // Bingo! 382 } 383 384 // Now try smaller objects. 385 // One such object is not enough, so we need to reclaim several of them. 386 reclaimed := uintptr(0) 387 for i := 0; i < int(npage) && i < len(h.busy); i++ { 388 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) 389 if reclaimed >= npage { 390 return 391 } 392 } 393 394 // Now sweep everything that is not yet swept. 395 unlock(&h.lock) 396 for { 397 n := sweepone() 398 if n == ^uintptr(0) { // all spans are swept 399 break 400 } 401 reclaimed += n 402 if reclaimed >= npage { 403 break 404 } 405 } 406 lock(&h.lock) 407 } 408 409 // Allocate a new span of npage pages from the heap for GC'd memory 410 // and record its size class in the HeapMap and HeapMapCache. 411 func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { 412 _g_ := getg() 413 if _g_ != _g_.m.g0 { 414 throw("_mheap_alloc not on g0 stack") 415 } 416 lock(&h.lock) 417 418 // To prevent excessive heap growth, before allocating n pages 419 // we need to sweep and reclaim at least n pages. 420 if h.sweepdone == 0 { 421 // TODO(austin): This tends to sweep a large number of 422 // spans in order to find a few completely free spans 423 // (for example, in the garbage benchmark, this sweeps 424 // ~30x the number of pages its trying to allocate). 425 // If GC kept a bit for whether there were any marks 426 // in a span, we could release these free spans 427 // at the end of GC and eliminate this entirely. 428 h.reclaim(npage) 429 } 430 431 // transfer stats from cache to global 432 memstats.heap_scan += uint64(_g_.m.mcache.local_scan) 433 _g_.m.mcache.local_scan = 0 434 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) 435 _g_.m.mcache.local_tinyallocs = 0 436 437 s := h.allocSpanLocked(npage) 438 if s != nil { 439 // Record span info, because gc needs to be 440 // able to map interior pointer to containing span. 441 atomic.Store(&s.sweepgen, h.sweepgen) 442 s.state = _MSpanInUse 443 s.freelist = 0 444 s.ref = 0 445 s.sizeclass = uint8(sizeclass) 446 if sizeclass == 0 { 447 s.elemsize = s.npages << _PageShift 448 s.divShift = 0 449 s.divMul = 0 450 s.divShift2 = 0 451 s.baseMask = 0 452 } else { 453 s.elemsize = uintptr(class_to_size[sizeclass]) 454 m := &class_to_divmagic[sizeclass] 455 s.divShift = m.shift 456 s.divMul = m.mul 457 s.divShift2 = m.shift2 458 s.baseMask = m.baseMask 459 } 460 461 // update stats, sweep lists 462 h.pagesInUse += uint64(npage) 463 if large { 464 memstats.heap_objects++ 465 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) 466 // Swept spans are at the end of lists. 467 if s.npages < uintptr(len(h.free)) { 468 h.busy[s.npages].insertBack(s) 469 } else { 470 h.busylarge.insertBack(s) 471 } 472 } 473 } 474 // heap_scan and heap_live were updated. 475 if gcBlackenEnabled != 0 { 476 gcController.revise() 477 } 478 479 if trace.enabled { 480 traceHeapAlloc() 481 } 482 483 // h_spans is accessed concurrently without synchronization 484 // from other threads. Hence, there must be a store/store 485 // barrier here to ensure the writes to h_spans above happen 486 // before the caller can publish a pointer p to an object 487 // allocated from s. As soon as this happens, the garbage 488 // collector running on another processor could read p and 489 // look up s in h_spans. The unlock acts as the barrier to 490 // order these writes. On the read side, the data dependency 491 // between p and the index in h_spans orders the reads. 492 unlock(&h.lock) 493 return s 494 } 495 496 func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan { 497 // Don't do any operations that lock the heap on the G stack. 498 // It might trigger stack growth, and the stack growth code needs 499 // to be able to allocate heap. 500 var s *mspan 501 systemstack(func() { 502 s = h.alloc_m(npage, sizeclass, large) 503 }) 504 505 if s != nil { 506 if needzero && s.needzero != 0 { 507 memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift) 508 } 509 s.needzero = 0 510 } 511 return s 512 } 513 514 func (h *mheap) allocStack(npage uintptr) *mspan { 515 _g_ := getg() 516 if _g_ != _g_.m.g0 { 517 throw("mheap_allocstack not on g0 stack") 518 } 519 lock(&h.lock) 520 s := h.allocSpanLocked(npage) 521 if s != nil { 522 s.state = _MSpanStack 523 s.freelist = 0 524 s.ref = 0 525 memstats.stacks_inuse += uint64(s.npages << _PageShift) 526 } 527 528 // This unlock acts as a release barrier. See mHeap_Alloc_m. 529 unlock(&h.lock) 530 return s 531 } 532 533 // Allocates a span of the given size. h must be locked. 534 // The returned span has been removed from the 535 // free list, but its state is still MSpanFree. 536 func (h *mheap) allocSpanLocked(npage uintptr) *mspan { 537 var list *mSpanList 538 var s *mspan 539 540 // Try in fixed-size lists up to max. 541 for i := int(npage); i < len(h.free); i++ { 542 list = &h.free[i] 543 if !list.isEmpty() { 544 s = list.first 545 goto HaveSpan 546 } 547 } 548 549 // Best fit in list of large spans. 550 list = &h.freelarge 551 s = h.allocLarge(npage) 552 if s == nil { 553 if !h.grow(npage) { 554 return nil 555 } 556 s = h.allocLarge(npage) 557 if s == nil { 558 return nil 559 } 560 } 561 562 HaveSpan: 563 // Mark span in use. 564 if s.state != _MSpanFree { 565 throw("MHeap_AllocLocked - MSpan not free") 566 } 567 if s.npages < npage { 568 throw("MHeap_AllocLocked - bad npages") 569 } 570 list.remove(s) 571 if s.inList() { 572 throw("still in list") 573 } 574 if s.npreleased > 0 { 575 sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift) 576 memstats.heap_released -= uint64(s.npreleased << _PageShift) 577 s.npreleased = 0 578 } 579 580 if s.npages > npage { 581 // Trim extra and put it back in the heap. 582 t := (*mspan)(h.spanalloc.alloc()) 583 t.init(s.start+pageID(npage), s.npages-npage) 584 s.npages = npage 585 p := uintptr(t.start) 586 p -= (h.arena_start >> _PageShift) 587 if p > 0 { 588 h_spans[p-1] = s 589 } 590 h_spans[p] = t 591 h_spans[p+t.npages-1] = t 592 t.needzero = s.needzero 593 s.state = _MSpanStack // prevent coalescing with s 594 t.state = _MSpanStack 595 h.freeSpanLocked(t, false, false, s.unusedsince) 596 s.state = _MSpanFree 597 } 598 s.unusedsince = 0 599 600 p := uintptr(s.start) 601 p -= (h.arena_start >> _PageShift) 602 for n := uintptr(0); n < npage; n++ { 603 h_spans[p+n] = s 604 } 605 606 memstats.heap_inuse += uint64(npage << _PageShift) 607 memstats.heap_idle -= uint64(npage << _PageShift) 608 609 //println("spanalloc", hex(s.start<<_PageShift)) 610 if s.inList() { 611 throw("still in list") 612 } 613 return s 614 } 615 616 // Allocate a span of exactly npage pages from the list of large spans. 617 func (h *mheap) allocLarge(npage uintptr) *mspan { 618 return bestFit(&h.freelarge, npage, nil) 619 } 620 621 // Search list for smallest span with >= npage pages. 622 // If there are multiple smallest spans, take the one 623 // with the earliest starting address. 624 func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan { 625 for s := list.first; s != nil; s = s.next { 626 if s.npages < npage { 627 continue 628 } 629 if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) { 630 best = s 631 } 632 } 633 return best 634 } 635 636 // Try to add at least npage pages of memory to the heap, 637 // returning whether it worked. 638 // 639 // h must be locked. 640 func (h *mheap) grow(npage uintptr) bool { 641 // Ask for a big chunk, to reduce the number of mappings 642 // the operating system needs to track; also amortizes 643 // the overhead of an operating system mapping. 644 // Allocate a multiple of 64kB. 645 npage = round(npage, (64<<10)/_PageSize) 646 ask := npage << _PageShift 647 if ask < _HeapAllocChunk { 648 ask = _HeapAllocChunk 649 } 650 651 v := h.sysAlloc(ask) 652 if v == nil { 653 if ask > npage<<_PageShift { 654 ask = npage << _PageShift 655 v = h.sysAlloc(ask) 656 } 657 if v == nil { 658 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") 659 return false 660 } 661 } 662 663 // Create a fake "in use" span and free it, so that the 664 // right coalescing happens. 665 s := (*mspan)(h.spanalloc.alloc()) 666 s.init(pageID(uintptr(v)>>_PageShift), ask>>_PageShift) 667 p := uintptr(s.start) 668 p -= (h.arena_start >> _PageShift) 669 for i := p; i < p+s.npages; i++ { 670 h_spans[i] = s 671 } 672 atomic.Store(&s.sweepgen, h.sweepgen) 673 s.state = _MSpanInUse 674 h.pagesInUse += uint64(s.npages) 675 h.freeSpanLocked(s, false, true, 0) 676 return true 677 } 678 679 // Look up the span at the given address. 680 // Address is guaranteed to be in map 681 // and is guaranteed to be start or end of span. 682 func (h *mheap) lookup(v unsafe.Pointer) *mspan { 683 p := uintptr(v) 684 p -= h.arena_start 685 return h_spans[p>>_PageShift] 686 } 687 688 // Look up the span at the given address. 689 // Address is *not* guaranteed to be in map 690 // and may be anywhere in the span. 691 // Map entries for the middle of a span are only 692 // valid for allocated spans. Free spans may have 693 // other garbage in their middles, so we have to 694 // check for that. 695 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { 696 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { 697 return nil 698 } 699 p := uintptr(v) >> _PageShift 700 q := p 701 q -= h.arena_start >> _PageShift 702 s := h_spans[q] 703 if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { 704 return nil 705 } 706 return s 707 } 708 709 // Free the span back into the heap. 710 func (h *mheap) freeSpan(s *mspan, acct int32) { 711 systemstack(func() { 712 mp := getg().m 713 lock(&h.lock) 714 memstats.heap_scan += uint64(mp.mcache.local_scan) 715 mp.mcache.local_scan = 0 716 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) 717 mp.mcache.local_tinyallocs = 0 718 if acct != 0 { 719 memstats.heap_objects-- 720 } 721 if gcBlackenEnabled != 0 { 722 // heap_scan changed. 723 gcController.revise() 724 } 725 h.freeSpanLocked(s, true, true, 0) 726 unlock(&h.lock) 727 }) 728 } 729 730 func (h *mheap) freeStack(s *mspan) { 731 _g_ := getg() 732 if _g_ != _g_.m.g0 { 733 throw("mheap_freestack not on g0 stack") 734 } 735 s.needzero = 1 736 lock(&h.lock) 737 memstats.stacks_inuse -= uint64(s.npages << _PageShift) 738 h.freeSpanLocked(s, true, true, 0) 739 unlock(&h.lock) 740 } 741 742 // s must be on a busy list (h.busy or h.busylarge) or unlinked. 743 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { 744 switch s.state { 745 case _MSpanStack: 746 if s.ref != 0 { 747 throw("MHeap_FreeSpanLocked - invalid stack free") 748 } 749 case _MSpanInUse: 750 if s.ref != 0 || s.sweepgen != h.sweepgen { 751 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 752 throw("MHeap_FreeSpanLocked - invalid free") 753 } 754 h.pagesInUse -= uint64(s.npages) 755 default: 756 throw("MHeap_FreeSpanLocked - invalid span state") 757 } 758 759 if acctinuse { 760 memstats.heap_inuse -= uint64(s.npages << _PageShift) 761 } 762 if acctidle { 763 memstats.heap_idle += uint64(s.npages << _PageShift) 764 } 765 s.state = _MSpanFree 766 if s.inList() { 767 h.busyList(s.npages).remove(s) 768 } 769 770 // Stamp newly unused spans. The scavenger will use that 771 // info to potentially give back some pages to the OS. 772 s.unusedsince = unusedsince 773 if unusedsince == 0 { 774 s.unusedsince = nanotime() 775 } 776 s.npreleased = 0 777 778 // Coalesce with earlier, later spans. 779 p := uintptr(s.start) 780 p -= h.arena_start >> _PageShift 781 if p > 0 { 782 t := h_spans[p-1] 783 if t != nil && t.state == _MSpanFree { 784 s.start = t.start 785 s.npages += t.npages 786 s.npreleased = t.npreleased // absorb released pages 787 s.needzero |= t.needzero 788 p -= t.npages 789 h_spans[p] = s 790 h.freeList(t.npages).remove(t) 791 t.state = _MSpanDead 792 h.spanalloc.free(unsafe.Pointer(t)) 793 } 794 } 795 if (p+s.npages)*sys.PtrSize < h.spans_mapped { 796 t := h_spans[p+s.npages] 797 if t != nil && t.state == _MSpanFree { 798 s.npages += t.npages 799 s.npreleased += t.npreleased 800 s.needzero |= t.needzero 801 h_spans[p+s.npages-1] = s 802 h.freeList(t.npages).remove(t) 803 t.state = _MSpanDead 804 h.spanalloc.free(unsafe.Pointer(t)) 805 } 806 } 807 808 // Insert s into appropriate list. 809 h.freeList(s.npages).insert(s) 810 } 811 812 func (h *mheap) freeList(npages uintptr) *mSpanList { 813 if npages < uintptr(len(h.free)) { 814 return &h.free[npages] 815 } 816 return &h.freelarge 817 } 818 819 func (h *mheap) busyList(npages uintptr) *mSpanList { 820 if npages < uintptr(len(h.free)) { 821 return &h.busy[npages] 822 } 823 return &h.busylarge 824 } 825 826 func scavengelist(list *mSpanList, now, limit uint64) uintptr { 827 if sys.PhysPageSize > _PageSize { 828 // golang.org/issue/9993 829 // If the physical page size of the machine is larger than 830 // our logical heap page size the kernel may round up the 831 // amount to be freed to its page size and corrupt the heap 832 // pages surrounding the unused block. 833 return 0 834 } 835 836 if list.isEmpty() { 837 return 0 838 } 839 840 var sumreleased uintptr 841 for s := list.first; s != nil; s = s.next { 842 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { 843 released := (s.npages - s.npreleased) << _PageShift 844 memstats.heap_released += uint64(released) 845 sumreleased += released 846 s.npreleased = s.npages 847 sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift) 848 } 849 } 850 return sumreleased 851 } 852 853 func (h *mheap) scavenge(k int32, now, limit uint64) { 854 lock(&h.lock) 855 var sumreleased uintptr 856 for i := 0; i < len(h.free); i++ { 857 sumreleased += scavengelist(&h.free[i], now, limit) 858 } 859 sumreleased += scavengelist(&h.freelarge, now, limit) 860 unlock(&h.lock) 861 862 if debug.gctrace > 0 { 863 if sumreleased > 0 { 864 print("scvg", k, ": ", sumreleased>>20, " MB released\n") 865 } 866 // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap. 867 // But we can't call ReadMemStats on g0 holding locks. 868 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") 869 } 870 } 871 872 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 873 func runtime_debug_freeOSMemory() { 874 gcStart(gcForceBlockMode, false) 875 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) 876 } 877 878 // Initialize a new span with the given start and npages. 879 func (span *mspan) init(start pageID, npages uintptr) { 880 span.next = nil 881 span.prev = nil 882 span.list = nil 883 span.start = start 884 span.npages = npages 885 span.freelist = 0 886 span.ref = 0 887 span.sizeclass = 0 888 span.incache = false 889 span.elemsize = 0 890 span.state = _MSpanDead 891 span.unusedsince = 0 892 span.npreleased = 0 893 span.speciallock.key = 0 894 span.specials = nil 895 span.needzero = 0 896 } 897 898 func (span *mspan) inList() bool { 899 return span.prev != nil 900 } 901 902 // Initialize an empty doubly-linked list. 903 func (list *mSpanList) init() { 904 list.first = nil 905 list.last = &list.first 906 } 907 908 func (list *mSpanList) remove(span *mspan) { 909 if span.prev == nil || span.list != list { 910 println("failed MSpanList_Remove", span, span.prev, span.list, list) 911 throw("MSpanList_Remove") 912 } 913 if span.next != nil { 914 span.next.prev = span.prev 915 } else { 916 // TODO: After we remove the span.list != list check above, 917 // we could at least still check list.last == &span.next here. 918 list.last = span.prev 919 } 920 *span.prev = span.next 921 span.next = nil 922 span.prev = nil 923 span.list = nil 924 } 925 926 func (list *mSpanList) isEmpty() bool { 927 return list.first == nil 928 } 929 930 func (list *mSpanList) insert(span *mspan) { 931 if span.next != nil || span.prev != nil || span.list != nil { 932 println("failed MSpanList_Insert", span, span.next, span.prev, span.list) 933 throw("MSpanList_Insert") 934 } 935 span.next = list.first 936 if list.first != nil { 937 list.first.prev = &span.next 938 } else { 939 list.last = &span.next 940 } 941 list.first = span 942 span.prev = &list.first 943 span.list = list 944 } 945 946 func (list *mSpanList) insertBack(span *mspan) { 947 if span.next != nil || span.prev != nil || span.list != nil { 948 println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list) 949 throw("MSpanList_InsertBack") 950 } 951 span.next = nil 952 span.prev = list.last 953 *list.last = span 954 list.last = &span.next 955 span.list = list 956 } 957 958 const ( 959 _KindSpecialFinalizer = 1 960 _KindSpecialProfile = 2 961 // Note: The finalizer special must be first because if we're freeing 962 // an object, a finalizer special will cause the freeing operation 963 // to abort, and we want to keep the other special records around 964 // if that happens. 965 ) 966 967 type special struct { 968 next *special // linked list in span 969 offset uint16 // span offset of object 970 kind byte // kind of special 971 } 972 973 // Adds the special record s to the list of special records for 974 // the object p. All fields of s should be filled in except for 975 // offset & next, which this routine will fill in. 976 // Returns true if the special was successfully added, false otherwise. 977 // (The add will fail only if a record with the same p and s->kind 978 // already exists.) 979 func addspecial(p unsafe.Pointer, s *special) bool { 980 span := mheap_.lookupMaybe(p) 981 if span == nil { 982 throw("addspecial on invalid pointer") 983 } 984 985 // Ensure that the span is swept. 986 // Sweeping accesses the specials list w/o locks, so we have 987 // to synchronize with it. And it's just much safer. 988 mp := acquirem() 989 span.ensureSwept() 990 991 offset := uintptr(p) - uintptr(span.start<<_PageShift) 992 kind := s.kind 993 994 lock(&span.speciallock) 995 996 // Find splice point, check for existing record. 997 t := &span.specials 998 for { 999 x := *t 1000 if x == nil { 1001 break 1002 } 1003 if offset == uintptr(x.offset) && kind == x.kind { 1004 unlock(&span.speciallock) 1005 releasem(mp) 1006 return false // already exists 1007 } 1008 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) { 1009 break 1010 } 1011 t = &x.next 1012 } 1013 1014 // Splice in record, fill in offset. 1015 s.offset = uint16(offset) 1016 s.next = *t 1017 *t = s 1018 unlock(&span.speciallock) 1019 releasem(mp) 1020 1021 return true 1022 } 1023 1024 // Removes the Special record of the given kind for the object p. 1025 // Returns the record if the record existed, nil otherwise. 1026 // The caller must FixAlloc_Free the result. 1027 func removespecial(p unsafe.Pointer, kind uint8) *special { 1028 span := mheap_.lookupMaybe(p) 1029 if span == nil { 1030 throw("removespecial on invalid pointer") 1031 } 1032 1033 // Ensure that the span is swept. 1034 // Sweeping accesses the specials list w/o locks, so we have 1035 // to synchronize with it. And it's just much safer. 1036 mp := acquirem() 1037 span.ensureSwept() 1038 1039 offset := uintptr(p) - uintptr(span.start<<_PageShift) 1040 1041 lock(&span.speciallock) 1042 t := &span.specials 1043 for { 1044 s := *t 1045 if s == nil { 1046 break 1047 } 1048 // This function is used for finalizers only, so we don't check for 1049 // "interior" specials (p must be exactly equal to s->offset). 1050 if offset == uintptr(s.offset) && kind == s.kind { 1051 *t = s.next 1052 unlock(&span.speciallock) 1053 releasem(mp) 1054 return s 1055 } 1056 t = &s.next 1057 } 1058 unlock(&span.speciallock) 1059 releasem(mp) 1060 return nil 1061 } 1062 1063 // The described object has a finalizer set for it. 1064 type specialfinalizer struct { 1065 special special 1066 fn *funcval 1067 nret uintptr 1068 fint *_type 1069 ot *ptrtype 1070 } 1071 1072 // Adds a finalizer to the object p. Returns true if it succeeded. 1073 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 1074 lock(&mheap_.speciallock) 1075 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 1076 unlock(&mheap_.speciallock) 1077 s.special.kind = _KindSpecialFinalizer 1078 s.fn = f 1079 s.nret = nret 1080 s.fint = fint 1081 s.ot = ot 1082 if addspecial(p, &s.special) { 1083 // This is responsible for maintaining the same 1084 // GC-related invariants as markrootSpans in any 1085 // situation where it's possible that markrootSpans 1086 // has already run but mark termination hasn't yet. 1087 if gcphase != _GCoff { 1088 _, base, _ := findObject(p) 1089 mp := acquirem() 1090 gcw := &mp.p.ptr().gcw 1091 // Mark everything reachable from the object 1092 // so it's retained for the finalizer. 1093 scanobject(uintptr(base), gcw) 1094 // Mark the finalizer itself, since the 1095 // special isn't part of the GC'd heap. 1096 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) 1097 if gcBlackenPromptly { 1098 gcw.dispose() 1099 } 1100 releasem(mp) 1101 } 1102 return true 1103 } 1104 1105 // There was an old finalizer 1106 lock(&mheap_.speciallock) 1107 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1108 unlock(&mheap_.speciallock) 1109 return false 1110 } 1111 1112 // Removes the finalizer (if any) from the object p. 1113 func removefinalizer(p unsafe.Pointer) { 1114 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 1115 if s == nil { 1116 return // there wasn't a finalizer to remove 1117 } 1118 lock(&mheap_.speciallock) 1119 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1120 unlock(&mheap_.speciallock) 1121 } 1122 1123 // The described object is being heap profiled. 1124 type specialprofile struct { 1125 special special 1126 b *bucket 1127 } 1128 1129 // Set the heap profile bucket associated with addr to b. 1130 func setprofilebucket(p unsafe.Pointer, b *bucket) { 1131 lock(&mheap_.speciallock) 1132 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 1133 unlock(&mheap_.speciallock) 1134 s.special.kind = _KindSpecialProfile 1135 s.b = b 1136 if !addspecial(p, &s.special) { 1137 throw("setprofilebucket: profile already set") 1138 } 1139 } 1140 1141 // Do whatever cleanup needs to be done to deallocate s. It has 1142 // already been unlinked from the MSpan specials list. 1143 func freespecial(s *special, p unsafe.Pointer, size uintptr) { 1144 switch s.kind { 1145 case _KindSpecialFinalizer: 1146 sf := (*specialfinalizer)(unsafe.Pointer(s)) 1147 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 1148 lock(&mheap_.speciallock) 1149 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 1150 unlock(&mheap_.speciallock) 1151 case _KindSpecialProfile: 1152 sp := (*specialprofile)(unsafe.Pointer(s)) 1153 mProf_Free(sp.b, size) 1154 lock(&mheap_.speciallock) 1155 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 1156 unlock(&mheap_.speciallock) 1157 default: 1158 throw("bad special kind") 1159 panic("not reached") 1160 } 1161 }