github.com/zxy12/golang_with_comment@v0.0.0-20190701084843-0e6b2aff5ef3/runtime/mheap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.go for overview.
     8  
     9  package runtime
    10  
    11  import (
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  // minPhysPageSize is a lower-bound on the physical page size. The
    18  // true physical page size may be larger than this. In contrast,
    19  // sys.PhysPageSize is an upper-bound on the physical page size.
    20  const minPhysPageSize = 4096
    21  
    22  // Main malloc heap.
    23  // The heap itself is the "free[]" and "large" arrays,
    24  // but all the other global data is here too.
    25  //
    26  // mheap must not be heap-allocated because it contains mSpanLists,
    27  // which must not be heap-allocated.
    28  //
    29  //go:notinheap
    30  
    31  type mheap struct {
    32  	lock      mutex
    33  	free      [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList
    34  	freelarge mTreap                   // free treap of length >= _MaxMHeapList
    35  	busy      [_MaxMHeapList]mSpanList // busy lists of large spans of given length
    36  	busylarge mSpanList                // busy lists of large spans length >= _MaxMHeapList
    37  	sweepgen  uint32                   // sweep generation, see comment in mspan
    38  	sweepdone uint32                   // all spans are swept
    39  	sweepers  uint32                   // number of active sweepone calls
    40  
    41  	// allspans is a slice of all mspans ever created. Each mspan
    42  	// appears exactly once.
    43  	//
    44  	// The memory for allspans is manually managed and can be
    45  	// reallocated and move as the heap grows.
    46  	//
    47  	// In general, allspans is protected by mheap_.lock, which
    48  	// prevents concurrent access as well as freeing the backing
    49  	// store. Accesses during STW might not hold the lock, but
    50  	// must ensure that allocation cannot happen around the
    51  	// access (since that may free the backing store).
    52  	allspans []*mspan // all spans out there
    53  
    54  	// spans is a lookup table to map virtual address page IDs to *mspan.
    55  	// For allocated spans, their pages map to the span itself.
    56  	// For free spans, only the lowest and highest pages map to the span itself.
    57  	// Internal pages map to an arbitrary span.
    58  	// For pages that have never been allocated, spans entries are nil.
    59  	//
    60  	// This is backed by a reserved region of the address space so
    61  	// it can grow without moving. The memory up to len(spans) is
    62  	// mapped. cap(spans) indicates the total reserved memory.
    63  	spans []*mspan
    64  
    65  	// sweepSpans contains two mspan stacks: one of swept in-use
    66  	// spans, and one of unswept in-use spans. These two trade
    67  	// roles on each GC cycle. Since the sweepgen increases by 2
    68  	// on each cycle, this means the swept spans are in
    69  	// sweepSpans[sweepgen/2%2] and the unswept spans are in
    70  	// sweepSpans[1-sweepgen/2%2]. Sweeping pops spans from the
    71  	// unswept stack and pushes spans that are still in-use on the
    72  	// swept stack. Likewise, allocating an in-use span pushes it
    73  	// on the swept stack.
    74  	sweepSpans [2]gcSweepBuf
    75  
    76  	_ uint32 // align uint64 fields on 32-bit for atomics
    77  
    78  	// Proportional sweep
    79  	//
    80  	// These parameters represent a linear function from heap_live
    81  	// to page sweep count. The proportional sweep system works to
    82  	// stay in the black by keeping the current page sweep count
    83  	// above this line at the current heap_live.
    84  	//
    85  	// The line has slope sweepPagesPerByte and passes through a
    86  	// basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
    87  	// any given time, the system is at (memstats.heap_live,
    88  	// pagesSwept) in this space.
    89  	//
    90  	// It's important that the line pass through a point we
    91  	// control rather than simply starting at a (0,0) origin
    92  	// because that lets us adjust sweep pacing at any time while
    93  	// accounting for current progress. If we could only adjust
    94  	// the slope, it would create a discontinuity in debt if any
    95  	// progress has already been made.
    96  	pagesInUse         uint64  // pages of spans in stats _MSpanInUse; R/W with mheap.lock
    97  	pagesSwept         uint64  // pages swept this cycle; updated atomically
    98  	pagesSweptBasis    uint64  // pagesSwept to use as the origin of the sweep ratio; updated atomically
    99  	sweepHeapLiveBasis uint64  // value of heap_live to use as the origin of sweep ratio; written with lock, read without
   100  	sweepPagesPerByte  float64 // proportional sweep ratio; written with lock, read without
   101  	// TODO(austin): pagesInUse should be a uintptr, but the 386
   102  	// compiler can't 8-byte align fields.
   103  
   104  	// Malloc stats.
   105  	largealloc  uint64                  // bytes allocated for large objects
   106  	nlargealloc uint64                  // number of large object allocations
   107  	largefree   uint64                  // bytes freed for large objects (>maxsmallsize)
   108  	nlargefree  uint64                  // number of frees for large objects (>maxsmallsize)
   109  	nsmallfree  [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
   110  
   111  	// range of addresses we might see in the heap
   112  	bitmap        uintptr // Points to one byte past the end of the bitmap
   113  	bitmap_mapped uintptr
   114  
   115  	// The arena_* fields indicate the addresses of the Go heap.
   116  	//
   117  	// The maximum range of the Go heap is
   118  	// [arena_start, arena_start+_MaxMem+1).
   119  	//
   120  	// The range of the current Go heap is
   121  	// [arena_start, arena_used). Parts of this range may not be
   122  	// mapped, but the metadata structures are always mapped for
   123  	// the full range.
   124  	arena_start uintptr
   125  	arena_used  uintptr // Set with setArenaUsed.
   126  
   127  	// The heap is grown using a linear allocator that allocates
   128  	// from the block [arena_alloc, arena_end). arena_alloc is
   129  	// often, but *not always* equal to arena_used.
   130  	arena_alloc uintptr
   131  	arena_end   uintptr
   132  
   133  	// arena_reserved indicates that the memory [arena_alloc,
   134  	// arena_end) is reserved (e.g., mapped PROT_NONE). If this is
   135  	// false, we have to be careful not to clobber existing
   136  	// mappings here. If this is true, then we own the mapping
   137  	// here and *must* clobber it to use it.
   138  	arena_reserved bool
   139  
   140  	_ uint32 // ensure 64-bit alignment
   141  
   142  	// central free lists for small size classes.
   143  	// the padding makes sure that the MCentrals are
   144  	// spaced CacheLineSize bytes apart, so that each MCentral.lock
   145  	// gets its own cache line.
   146  	// central is indexed by spanClass.
   147  	central [numSpanClasses]struct {
   148  		mcentral mcentral
   149  		pad      [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte
   150  	}
   151  
   152  	spanalloc             fixalloc // allocator for span*
   153  	cachealloc            fixalloc // allocator for mcache*
   154  	treapalloc            fixalloc // allocator for treapNodes* used by large objects
   155  	specialfinalizeralloc fixalloc // allocator for specialfinalizer*
   156  	specialprofilealloc   fixalloc // allocator for specialprofile*
   157  	speciallock           mutex    // lock for special record allocators.
   158  }
   159  
   160  var mheap_ mheap
   161  
   162  // An MSpan is a run of pages.
   163  //
   164  // When a MSpan is in the heap free list, state == MSpanFree
   165  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
   166  //
   167  // When a MSpan is allocated, state == MSpanInUse or MSpanManual
   168  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
   169  
   170  // Every MSpan is in one doubly-linked list,
   171  // either one of the MHeap's free lists or one of the
   172  // MCentral's span lists.
   173  
   174  // An MSpan representing actual memory has state _MSpanInUse,
   175  // _MSpanManual, or _MSpanFree. Transitions between these states are
   176  // constrained as follows:
   177  //
   178  // * A span may transition from free to in-use or manual during any GC
   179  //   phase.
   180  //
   181  // * During sweeping (gcphase == _GCoff), a span may transition from
   182  //   in-use to free (as a result of sweeping) or manual to free (as a
   183  //   result of stacks being freed).
   184  //
   185  // * During GC (gcphase != _GCoff), a span *must not* transition from
   186  //   manual or in-use to free. Because concurrent GC may read a pointer
   187  //   and then look up its span, the span state must be monotonic.
   188  type mSpanState uint8
   189  
   190  const (
   191  	_MSpanDead   mSpanState = iota
   192  	_MSpanInUse             // allocated for garbage collected heap
   193  	_MSpanManual            // allocated for manual management (e.g., stack allocator)
   194  	_MSpanFree
   195  )
   196  
   197  // mSpanStateNames are the names of the span states, indexed by
   198  // mSpanState.
   199  var mSpanStateNames = []string{
   200  	"_MSpanDead",
   201  	"_MSpanInUse",
   202  	"_MSpanManual",
   203  	"_MSpanFree",
   204  }
   205  
   206  // mSpanList heads a linked list of spans.
   207  //
   208  //go:notinheap
   209  type mSpanList struct {
   210  	first *mspan // first span in list, or nil if none
   211  	last  *mspan // last span in list, or nil if none
   212  }
   213  
   214  //go:notinheap
   215  type mspan struct {
   216  	next *mspan     // next span in list, or nil if none
   217  	prev *mspan     // previous span in list, or nil if none
   218  	list *mSpanList // For debugging. TODO: Remove.
   219  
   220  	startAddr uintptr // address of first byte of span aka s.base()
   221  	npages    uintptr // number of pages in span
   222  
   223  	manualFreeList gclinkptr // list of free objects in _MSpanManual spans
   224  
   225  	// freeindex is the slot index between 0 and nelems at which to begin scanning
   226  	// for the next free object in this span.
   227  	// Each allocation scans allocBits starting at freeindex until it encounters a 0
   228  	// indicating a free object. freeindex is then adjusted so that subsequent scans begin
   229  	// just past the newly discovered free object.
   230  	//
   231  	// If freeindex == nelem, this span has no free objects.
   232  	//
   233  	// allocBits is a bitmap of objects in this span.
   234  	// If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
   235  	// then object n is free;
   236  	// otherwise, object n is allocated. Bits starting at nelem are
   237  	// undefined and should never be referenced.
   238  	//
   239  	// Object n starts at address n*elemsize + (start << pageShift).
   240  	freeindex uintptr
   241  	// TODO: Look up nelems from sizeclass and remove this field if it
   242  	// helps performance.
   243  	nelems uintptr // number of object in the span.
   244  
   245  	// Cache of the allocBits at freeindex. allocCache is shifted
   246  	// such that the lowest bit corresponds to the bit freeindex.
   247  	// allocCache holds the complement of allocBits, thus allowing
   248  	// ctz (count trailing zero) to use it directly.
   249  	// allocCache may contain bits beyond s.nelems; the caller must ignore
   250  	// these.
   251  	allocCache uint64
   252  
   253  	// allocBits and gcmarkBits hold pointers to a span's mark and
   254  	// allocation bits. The pointers are 8 byte aligned.
   255  	// There are three arenas where this data is held.
   256  	// free: Dirty arenas that are no longer accessed
   257  	//       and can be reused.
   258  	// next: Holds information to be used in the next GC cycle.
   259  	// current: Information being used during this GC cycle.
   260  	// previous: Information being used during the last GC cycle.
   261  	// A new GC cycle starts with the call to finishsweep_m.
   262  	// finishsweep_m moves the previous arena to the free arena,
   263  	// the current arena to the previous arena, and
   264  	// the next arena to the current arena.
   265  	// The next arena is populated as the spans request
   266  	// memory to hold gcmarkBits for the next GC cycle as well
   267  	// as allocBits for newly allocated spans.
   268  	//
   269  	// The pointer arithmetic is done "by hand" instead of using
   270  	// arrays to avoid bounds checks along critical performance
   271  	// paths.
   272  	// The sweep will free the old allocBits and set allocBits to the
   273  	// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
   274  	// out memory.
   275  	allocBits  *gcBits
   276  	gcmarkBits *gcBits
   277  
   278  	// sweep generation:
   279  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   280  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   281  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   282  	// h->sweepgen is incremented by 2 after every GC
   283  
   284  	sweepgen    uint32
   285  	divMul      uint16     // for divide by elemsize - divMagic.mul
   286  	baseMask    uint16     // if non-0, elemsize is a power of 2, & this will get object allocation base
   287  	allocCount  uint16     // number of allocated objects
   288  	spanclass   spanClass  // size class and noscan (uint8)
   289  	incache     bool       // being used by an mcache
   290  	state       mSpanState // mspaninuse etc
   291  	needzero    uint8      // needs to be zeroed before allocation
   292  	divShift    uint8      // for divide by elemsize - divMagic.shift
   293  	divShift2   uint8      // for divide by elemsize - divMagic.shift2
   294  	elemsize    uintptr    // computed from sizeclass or from npages
   295  	unusedsince int64      // first time spotted by gc in mspanfree state
   296  	npreleased  uintptr    // number of pages released to the os
   297  	limit       uintptr    // end of data in span
   298  	speciallock mutex      // guards specials list
   299  	specials    *special   // linked list of special records sorted by offset.
   300  }
   301  
   302  func (s *mspan) base() uintptr {
   303  	return s.startAddr
   304  }
   305  
   306  func (s *mspan) layout() (size, n, total uintptr) {
   307  	total = s.npages << _PageShift
   308  	size = s.elemsize
   309  	if size > 0 {
   310  		n = total / size
   311  	}
   312  	return
   313  }
   314  
   315  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
   316  	h := (*mheap)(vh)
   317  	s := (*mspan)(p)
   318  	if len(h.allspans) >= cap(h.allspans) {
   319  		n := 64 * 1024 / sys.PtrSize
   320  		if n < cap(h.allspans)*3/2 {
   321  			n = cap(h.allspans) * 3 / 2
   322  		}
   323  		var new []*mspan
   324  		sp := (*slice)(unsafe.Pointer(&new))
   325  		sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
   326  		if sp.array == nil {
   327  			throw("runtime: cannot allocate memory")
   328  		}
   329  		sp.len = len(h.allspans)
   330  		sp.cap = n
   331  		if len(h.allspans) > 0 {
   332  			copy(new, h.allspans)
   333  		}
   334  		oldAllspans := h.allspans
   335  		h.allspans = new
   336  		if len(oldAllspans) != 0 {
   337  			sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
   338  		}
   339  	}
   340  	h.allspans = append(h.allspans, s)
   341  }
   342  
   343  // A spanClass represents the size class and noscan-ness of a span.
   344  //
   345  // Each size class has a noscan spanClass and a scan spanClass. The
   346  // noscan spanClass contains only noscan objects, which do not contain
   347  // pointers and thus do not need to be scanned by the garbage
   348  // collector.
   349  type spanClass uint8
   350  
   351  const (
   352  	numSpanClasses = _NumSizeClasses << 1
   353  	tinySpanClass  = spanClass(tinySizeClass<<1 | 1)
   354  )
   355  
   356  func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
   357  	return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
   358  }
   359  
   360  func (sc spanClass) sizeclass() int8 {
   361  	return int8(sc >> 1)
   362  }
   363  
   364  func (sc spanClass) noscan() bool {
   365  	return sc&1 != 0
   366  }
   367  
   368  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   369  // It returns false for pointers into _MSpanManual spans.
   370  // Non-preemptible because it is used by write barriers.
   371  //go:nowritebarrier
   372  //go:nosplit
   373  func inheap(b uintptr) bool {
   374  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   375  		return false
   376  	}
   377  	// Not a beginning of a block, consult span table to find the block beginning.
   378  	s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
   379  	if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse {
   380  		return false
   381  	}
   382  	return true
   383  }
   384  
   385  // inHeapOrStack is a variant of inheap that returns true for pointers
   386  // into any allocated heap span.
   387  //
   388  //go:nowritebarrier
   389  //go:nosplit
   390  func inHeapOrStack(b uintptr) bool {
   391  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   392  		return false
   393  	}
   394  	// Not a beginning of a block, consult span table to find the block beginning.
   395  	s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
   396  	if s == nil || b < s.base() {
   397  		return false
   398  	}
   399  	switch s.state {
   400  	case mSpanInUse, _MSpanManual:
   401  		return b < s.limit
   402  	default:
   403  		return false
   404  	}
   405  }
   406  
   407  // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
   408  // Use the functions instead.
   409  
   410  // spanOf returns the span of p. If p does not point into the heap or
   411  // no span contains p, spanOf returns nil.
   412  func spanOf(p uintptr) *mspan {
   413  	if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
   414  		return nil
   415  	}
   416  	return spanOfUnchecked(p)
   417  }
   418  
   419  // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
   420  // that p points into the heap (that is, mheap_.arena_start <= p <
   421  // mheap_.arena_used).
   422  func spanOfUnchecked(p uintptr) *mspan {
   423  	return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
   424  }
   425  
   426  func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
   427  	_g_ := getg()
   428  
   429  	_g_.m.mcache.local_nlookup++
   430  	if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
   431  		// purge cache stats to prevent overflow
   432  		lock(&mheap_.lock)
   433  		purgecachedstats(_g_.m.mcache)
   434  		unlock(&mheap_.lock)
   435  	}
   436  
   437  	s := mheap_.lookupMaybe(unsafe.Pointer(v))
   438  	if sp != nil {
   439  		*sp = s
   440  	}
   441  	if s == nil {
   442  		if base != nil {
   443  			*base = 0
   444  		}
   445  		if size != nil {
   446  			*size = 0
   447  		}
   448  		return 0
   449  	}
   450  
   451  	p := s.base()
   452  	if s.spanclass.sizeclass() == 0 {
   453  		// Large object.
   454  		if base != nil {
   455  			*base = p
   456  		}
   457  		if size != nil {
   458  			*size = s.npages << _PageShift
   459  		}
   460  		return 1
   461  	}
   462  
   463  	n := s.elemsize
   464  	if base != nil {
   465  		i := (v - p) / n
   466  		*base = p + i*n
   467  	}
   468  	if size != nil {
   469  		*size = n
   470  	}
   471  
   472  	return 1
   473  }
   474  
   475  // Initialize the heap.
   476  func (h *mheap) init(spansStart, spansBytes uintptr) {
   477  	h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
   478  	h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
   479  	h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
   480  	h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
   481  	h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
   482  
   483  	// Don't zero mspan allocations. Background sweeping can
   484  	// inspect a span concurrently with allocating it, so it's
   485  	// important that the span's sweepgen survive across freeing
   486  	// and re-allocating a span to prevent background sweeping
   487  	// from improperly cas'ing it from 0.
   488  	//
   489  	// This is safe because mspan contains no heap pointers.
   490  	h.spanalloc.zero = false
   491  
   492  	// h->mapcache needs no init
   493  	// _MaxMHeapList = 128
   494  	for i := range h.free {
   495  		h.free[i].init()
   496  		h.busy[i].init()
   497  	}
   498  
   499  	h.busylarge.init()
   500  	for i := range h.central {
   501  		h.central[i].mcentral.init(spanClass(i))
   502  	}
   503  
   504  	sp := (*slice)(unsafe.Pointer(&h.spans))
   505  	sp.array = unsafe.Pointer(spansStart)
   506  	sp.len = 0
   507  	sp.cap = int(spansBytes / sys.PtrSize)
   508  
   509  	// Map metadata structures. But don't map race detector memory
   510  	// since we're not actually growing the arena here (and TSAN
   511  	// gets mad if you map 0 bytes).
   512  	h.setArenaUsed(h.arena_used, false)
   513  }
   514  
   515  // setArenaUsed extends the usable arena to address arena_used and
   516  // maps auxiliary VM regions for any newly usable arena space.
   517  //
   518  // racemap indicates that this memory should be managed by the race
   519  // detector. racemap should be true unless this is covering a VM hole.
   520  func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) {
   521  	// Map auxiliary structures *before* h.arena_used is updated.
   522  	// Waiting to update arena_used until after the memory has been mapped
   523  	// avoids faults when other threads try access these regions immediately
   524  	// after observing the change to arena_used.
   525  
   526  	// Map the bitmap.
   527  	h.mapBits(arena_used)
   528  
   529  	// Map spans array.
   530  	h.mapSpans(arena_used)
   531  
   532  	// Tell the race detector about the new heap memory.
   533  	if racemap && raceenabled {
   534  		racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used)
   535  	}
   536  
   537  	h.arena_used = arena_used
   538  }
   539  
   540  // mapSpans makes sure that the spans are mapped
   541  // up to the new value of arena_used.
   542  //
   543  // Don't call this directly. Call mheap.setArenaUsed.
   544  func (h *mheap) mapSpans(arena_used uintptr) {
   545  	// Map spans array, PageSize at a time.
   546  	n := arena_used
   547  	n -= h.arena_start
   548  	// 每个span区域的1个Byte对应1个pagesize
   549  	n = n / _PageSize * sys.PtrSize
   550  	n = round(n, physPageSize)            // 对齐1个物理page
   551  	need := n / unsafe.Sizeof(h.spans[0]) // h.spans[0] = *mspan = 一个指针
   552  	have := uintptr(len(h.spans))
   553  	//println("need=", need, "size spans=", unsafe.Sizeof(h.spans[0]), "have=", have)
   554  	if have >= need {
   555  		return
   556  	}
   557  	h.spans = h.spans[:need]
   558  	// 把spans上对应位置内存设置为可用,以一组为单位(1 Page)
   559  	sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys)
   560  	//tmp := h.spans[0]
   561  	////println("h.span0=", tmp)
   562  	//tmp2 := (*mspan)(unsafe.Pointer(uintptr(0)))
   563  	//h.spans[0] = tmp2
   564  	//h.spans[0] = tmp
   565  }
   566  
   567  // Sweeps spans in list until reclaims at least npages into heap.
   568  // Returns the actual number of pages reclaimed.
   569  func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
   570  	n := uintptr(0)
   571  	sg := mheap_.sweepgen
   572  retry:
   573  	for s := list.first; s != nil; s = s.next {
   574  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   575  			list.remove(s)
   576  			// swept spans are at the end of the list
   577  			list.insertBack(s) // Puts it back on a busy list. s is not in the treap at this point.
   578  			unlock(&h.lock)
   579  			snpages := s.npages
   580  			if s.sweep(false) {
   581  				n += snpages
   582  			}
   583  			lock(&h.lock)
   584  			if n >= npages {
   585  				return n
   586  			}
   587  			// the span could have been moved elsewhere
   588  			goto retry
   589  		}
   590  		if s.sweepgen == sg-1 {
   591  			// the span is being sweept by background sweeper, skip
   592  			continue
   593  		}
   594  		// already swept empty span,
   595  		// all subsequent ones must also be either swept or in process of sweeping
   596  		break
   597  	}
   598  	return n
   599  }
   600  
   601  // Sweeps and reclaims at least npage pages into heap.
   602  // Called before allocating npage pages.
   603  func (h *mheap) reclaim(npage uintptr) {
   604  	// First try to sweep busy spans with large objects of size >= npage,
   605  	// this has good chances of reclaiming the necessary space.
   606  	for i := int(npage); i < len(h.busy); i++ {
   607  		if h.reclaimList(&h.busy[i], npage) != 0 {
   608  			return // Bingo!
   609  		}
   610  	}
   611  
   612  	// Then -- even larger objects.
   613  	if h.reclaimList(&h.busylarge, npage) != 0 {
   614  		return // Bingo!
   615  	}
   616  
   617  	// Now try smaller objects.
   618  	// One such object is not enough, so we need to reclaim several of them.
   619  	reclaimed := uintptr(0)
   620  	for i := 0; i < int(npage) && i < len(h.busy); i++ {
   621  		reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
   622  		if reclaimed >= npage {
   623  			return
   624  		}
   625  	}
   626  
   627  	// Now sweep everything that is not yet swept.
   628  	unlock(&h.lock)
   629  	for {
   630  		n := sweepone()
   631  		if n == ^uintptr(0) { // all spans are swept
   632  			break
   633  		}
   634  		reclaimed += n
   635  		if reclaimed >= npage {
   636  			break
   637  		}
   638  	}
   639  	lock(&h.lock)
   640  }
   641  
   642  // Allocate a new span of npage pages from the heap for GC'd memory
   643  // and record its size class in the HeapMap and HeapMapCache.
   644  func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
   645  	_g_ := getg()
   646  	if _g_ != _g_.m.g0 {
   647  		throw("_mheap_alloc not on g0 stack")
   648  	}
   649  	lock(&h.lock)
   650  
   651  	// To prevent excessive heap growth, before allocating n pages
   652  	// we need to sweep and reclaim at least n pages.
   653  	if h.sweepdone == 0 {
   654  		// TODO(austin): This tends to sweep a large number of
   655  		// spans in order to find a few completely free spans
   656  		// (for example, in the garbage benchmark, this sweeps
   657  		// ~30x the number of pages its trying to allocate).
   658  		// If GC kept a bit for whether there were any marks
   659  		// in a span, we could release these free spans
   660  		// at the end of GC and eliminate this entirely.
   661  		if trace.enabled {
   662  			traceGCSweepStart()
   663  		}
   664  		h.reclaim(npage)
   665  		if trace.enabled {
   666  			traceGCSweepDone()
   667  		}
   668  	}
   669  
   670  	stat_mheap(h)
   671  
   672  	// transfer stats from cache to global
   673  	memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
   674  	_g_.m.mcache.local_scan = 0
   675  	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
   676  	_g_.m.mcache.local_tinyallocs = 0
   677  
   678  	s := h.allocSpanLocked(npage, &memstats.heap_inuse)
   679  	if s != nil {
   680  		// Record span info, because gc needs to be
   681  		// able to map interior pointer to containing span.
   682  		atomic.Store(&s.sweepgen, h.sweepgen)
   683  		h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
   684  		s.state = _MSpanInUse
   685  		s.allocCount = 0
   686  		s.spanclass = spanclass
   687  		if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
   688  			s.elemsize = s.npages << _PageShift
   689  			s.divShift = 0
   690  			s.divMul = 0
   691  			s.divShift2 = 0
   692  			s.baseMask = 0
   693  		} else {
   694  			s.elemsize = uintptr(class_to_size[sizeclass])
   695  			m := &class_to_divmagic[sizeclass]
   696  			s.divShift = m.shift
   697  			s.divMul = m.mul
   698  			s.divShift2 = m.shift2
   699  			s.baseMask = m.baseMask
   700  		}
   701  
   702  		// update stats, sweep lists
   703  		h.pagesInUse += uint64(npage)
   704  		if large {
   705  			memstats.heap_objects++
   706  			mheap_.largealloc += uint64(s.elemsize)
   707  			mheap_.nlargealloc++
   708  			atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
   709  			// Swept spans are at the end of lists.
   710  			if s.npages < uintptr(len(h.busy)) {
   711  				h.busy[s.npages].insertBack(s)
   712  			} else {
   713  				h.busylarge.insertBack(s)
   714  			}
   715  		}
   716  	}
   717  	// heap_scan and heap_live were updated.
   718  	if gcBlackenEnabled != 0 {
   719  		gcController.revise()
   720  	}
   721  
   722  	if trace.enabled {
   723  		traceHeapAlloc()
   724  	}
   725  
   726  	// h.spans is accessed concurrently without synchronization
   727  	// from other threads. Hence, there must be a store/store
   728  	// barrier here to ensure the writes to h.spans above happen
   729  	// before the caller can publish a pointer p to an object
   730  	// allocated from s. As soon as this happens, the garbage
   731  	// collector running on another processor could read p and
   732  	// look up s in h.spans. The unlock acts as the barrier to
   733  	// order these writes. On the read side, the data dependency
   734  	// between p and the index in h.spans orders the reads.
   735  	unlock(&h.lock)
   736  
   737  	stat_mheap(h)
   738  	return s
   739  }
   740  
   741  func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan {
   742  	// Don't do any operations that lock the heap on the G stack.
   743  	// It might trigger stack growth, and the stack growth code needs
   744  	// to be able to allocate heap.
   745  	var s *mspan
   746  	systemstack(func() {
   747  		s = h.alloc_m(npage, spanclass, large)
   748  	})
   749  
   750  	if s != nil {
   751  		if needzero && s.needzero != 0 {
   752  			memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift)
   753  		}
   754  		s.needzero = 0
   755  	}
   756  	return s
   757  }
   758  
   759  // allocManual allocates a manually-managed span of npage pages.
   760  // allocManual returns nil if allocation fails.
   761  //
   762  // allocManual adds the bytes used to *stat, which should be a
   763  // memstats in-use field. Unlike allocations in the GC'd heap, the
   764  // allocation does *not* count toward heap_inuse or heap_sys.
   765  //
   766  // The memory backing the returned span may not be zeroed if
   767  // span.needzero is set.
   768  //
   769  // allocManual must be called on the system stack to prevent stack
   770  // growth. Since this is used by the stack allocator, stack growth
   771  // during allocManual would self-deadlock.
   772  //
   773  //go:systemstack
   774  func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
   775  	lock(&h.lock)
   776  	s := h.allocSpanLocked(npage, stat)
   777  	if s != nil {
   778  		s.state = _MSpanManual
   779  		s.manualFreeList = 0
   780  		s.allocCount = 0
   781  		s.spanclass = 0
   782  		s.nelems = 0
   783  		s.elemsize = 0
   784  		s.limit = s.base() + s.npages<<_PageShift
   785  		// Manually manged memory doesn't count toward heap_sys.
   786  		memstats.heap_sys -= uint64(s.npages << _PageShift)
   787  	}
   788  
   789  	// This unlock acts as a release barrier. See mheap.alloc_m.
   790  	unlock(&h.lock)
   791  
   792  	return s
   793  }
   794  
   795  // Allocates a span of the given size.  h must be locked.
   796  // The returned span has been removed from the
   797  // free list, but its state is still MSpanFree.
   798  func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
   799  	var list *mSpanList
   800  	var s *mspan
   801  
   802  	// Try in fixed-size lists up to max.
   803  	for i := int(npage); i < len(h.free); i++ {
   804  		list = &h.free[i]
   805  		if !list.isEmpty() {
   806  			s = list.first
   807  			list.remove(s)
   808  			goto HaveSpan
   809  		}
   810  	}
   811  	// Best fit in list of large spans.
   812  	s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us
   813  	if s == nil {
   814  		//println("heap:treap have no fit span")
   815  		if !h.grow(npage) {
   816  			return nil
   817  		}
   818  		s = h.allocLarge(npage)
   819  		if s == nil {
   820  			return nil
   821  		}
   822  	}
   823  
   824  HaveSpan:
   825  	// Mark span in use.
   826  	if s.state != _MSpanFree {
   827  		throw("MHeap_AllocLocked - MSpan not free")
   828  	}
   829  	if s.npages < npage {
   830  		throw("MHeap_AllocLocked - bad npages")
   831  	}
   832  	if s.npreleased > 0 {
   833  		sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
   834  		memstats.heap_released -= uint64(s.npreleased << _PageShift)
   835  		s.npreleased = 0
   836  	}
   837  
   838  	if s.npages > npage {
   839  		// Trim extra and put it back in the heap.
   840  		t := (*mspan)(h.spanalloc.alloc())
   841  		t.init(s.base()+npage<<_PageShift, s.npages-npage)
   842  		s.npages = npage
   843  		p := (t.base() - h.arena_start) >> _PageShift
   844  		if p > 0 {
   845  			h.spans[p-1] = s
   846  		}
   847  		h.spans[p] = t
   848  		h.spans[p+t.npages-1] = t
   849  		t.needzero = s.needzero
   850  		s.state = _MSpanManual // prevent coalescing with s
   851  		t.state = _MSpanManual
   852  		h.freeSpanLocked(t, false, false, s.unusedsince)
   853  		s.state = _MSpanFree
   854  	}
   855  	s.unusedsince = 0
   856  
   857  	p := (s.base() - h.arena_start) >> _PageShift
   858  	for n := uintptr(0); n < npage; n++ {
   859  		h.spans[p+n] = s
   860  	}
   861  
   862  	*stat += uint64(npage << _PageShift)
   863  	memstats.heap_idle -= uint64(npage << _PageShift)
   864  
   865  	////println("spanalloc", hex(s.start<<_PageShift))
   866  	if s.inList() {
   867  		throw("still in list")
   868  	}
   869  	return s
   870  }
   871  
   872  // Large spans have a minimum size of 1MByte. The maximum number of large spans to support
   873  // 1TBytes is 1 million, experimentation using random sizes indicates that the depth of
   874  // the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced
   875  // by a perfectly balanced tree with a a depth of 20. Twice that is an acceptable 40.
   876  func (h *mheap) isLargeSpan(npages uintptr) bool {
   877  	return npages >= uintptr(len(h.free))
   878  }
   879  
   880  // allocLarge allocates a span of at least npage pages from the treap of large spans.
   881  // Returns nil if no such span currently exists.
   882  func (h *mheap) allocLarge(npage uintptr) *mspan {
   883  	// Search treap for smallest span with >= npage pages.
   884  	return h.freelarge.remove(npage)
   885  }
   886  
   887  // Try to add at least npage pages of memory to the heap,
   888  // returning whether it worked.
   889  //
   890  // h must be locked.
   891  func (h *mheap) grow(npage uintptr) bool {
   892  	// Ask for a big chunk, to reduce the number of mappings
   893  	// the operating system needs to track; also amortizes
   894  	// the overhead of an operating system mapping.
   895  	// Allocate a multiple of 64kB.
   896  	npage = round(npage, (64<<10)/_PageSize)
   897  	ask := npage << _PageShift
   898  	if ask < _HeapAllocChunk {
   899  		ask = _HeapAllocChunk
   900  	}
   901  
   902  	v := h.sysAlloc(ask)
   903  	if v == nil {
   904  		if ask > npage<<_PageShift {
   905  			ask = npage << _PageShift
   906  			v = h.sysAlloc(ask)
   907  		}
   908  		if v == nil {
   909  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
   910  			return false
   911  		}
   912  	}
   913  
   914  	// Create a fake "in use" span and free it, so that the
   915  	// right coalescing happens.
   916  	s := (*mspan)(h.spanalloc.alloc())
   917  	s.init(uintptr(v), ask>>_PageShift)
   918  	p := (s.base() - h.arena_start) >> _PageShift
   919  	for i := p; i < p+s.npages; i++ {
   920  		h.spans[i] = s
   921  	}
   922  	atomic.Store(&s.sweepgen, h.sweepgen)
   923  	s.state = _MSpanInUse
   924  	h.pagesInUse += uint64(s.npages)
   925  	h.freeSpanLocked(s, false, true, 0)
   926  	return true
   927  }
   928  
   929  // Look up the span at the given address.
   930  // Address is guaranteed to be in map
   931  // and is guaranteed to be start or end of span.
   932  func (h *mheap) lookup(v unsafe.Pointer) *mspan {
   933  	p := uintptr(v)
   934  	p -= h.arena_start
   935  	return h.spans[p>>_PageShift]
   936  }
   937  
   938  // Look up the span at the given address.
   939  // Address is *not* guaranteed to be in map
   940  // and may be anywhere in the span.
   941  // Map entries for the middle of a span are only
   942  // valid for allocated spans. Free spans may have
   943  // other garbage in their middles, so we have to
   944  // check for that.
   945  func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
   946  	if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
   947  		return nil
   948  	}
   949  	s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift]
   950  	if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
   951  		return nil
   952  	}
   953  	return s
   954  }
   955  
   956  // Free the span back into the heap.
   957  func (h *mheap) freeSpan(s *mspan, acct int32) {
   958  	systemstack(func() {
   959  		mp := getg().m
   960  		lock(&h.lock)
   961  		memstats.heap_scan += uint64(mp.mcache.local_scan)
   962  		mp.mcache.local_scan = 0
   963  		memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
   964  		mp.mcache.local_tinyallocs = 0
   965  		if msanenabled {
   966  			// Tell msan that this entire span is no longer in use.
   967  			base := unsafe.Pointer(s.base())
   968  			bytes := s.npages << _PageShift
   969  			msanfree(base, bytes)
   970  		}
   971  		if acct != 0 {
   972  			memstats.heap_objects--
   973  		}
   974  		if gcBlackenEnabled != 0 {
   975  			// heap_scan changed.
   976  			gcController.revise()
   977  		}
   978  		h.freeSpanLocked(s, true, true, 0)
   979  		unlock(&h.lock)
   980  	})
   981  }
   982  
   983  // freeManual frees a manually-managed span returned by allocManual.
   984  // stat must be the same as the stat passed to the allocManual that
   985  // allocated s.
   986  //
   987  // This must only be called when gcphase == _GCoff. See mSpanState for
   988  // an explanation.
   989  //
   990  // freeManual must be called on the system stack to prevent stack
   991  // growth, just like allocManual.
   992  //
   993  //go:systemstack
   994  func (h *mheap) freeManual(s *mspan, stat *uint64) {
   995  	s.needzero = 1
   996  	lock(&h.lock)
   997  	*stat -= uint64(s.npages << _PageShift)
   998  	memstats.heap_sys += uint64(s.npages << _PageShift)
   999  	h.freeSpanLocked(s, false, true, 0)
  1000  	unlock(&h.lock)
  1001  }
  1002  
  1003  // s must be on a busy list (h.busy or h.busylarge) or unlinked.
  1004  func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
  1005  	switch s.state {
  1006  	case _MSpanManual:
  1007  		if s.allocCount != 0 {
  1008  			throw("MHeap_FreeSpanLocked - invalid stack free")
  1009  		}
  1010  	case _MSpanInUse:
  1011  		if s.allocCount != 0 || s.sweepgen != h.sweepgen {
  1012  			print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
  1013  			throw("MHeap_FreeSpanLocked - invalid free")
  1014  		}
  1015  		h.pagesInUse -= uint64(s.npages)
  1016  	default:
  1017  		throw("MHeap_FreeSpanLocked - invalid span state")
  1018  	}
  1019  
  1020  	if acctinuse {
  1021  		memstats.heap_inuse -= uint64(s.npages << _PageShift)
  1022  	}
  1023  	if acctidle {
  1024  		memstats.heap_idle += uint64(s.npages << _PageShift)
  1025  	}
  1026  	s.state = _MSpanFree
  1027  	if s.inList() {
  1028  		h.busyList(s.npages).remove(s)
  1029  	}
  1030  
  1031  	// Stamp newly unused spans. The scavenger will use that
  1032  	// info to potentially give back some pages to the OS.
  1033  	s.unusedsince = unusedsince
  1034  	if unusedsince == 0 {
  1035  		s.unusedsince = nanotime()
  1036  	}
  1037  	s.npreleased = 0
  1038  
  1039  	// Coalesce with earlier, later spans.
  1040  	p := (s.base() - h.arena_start) >> _PageShift
  1041  	if p > 0 {
  1042  		before := h.spans[p-1]
  1043  		if before != nil && before.state == _MSpanFree {
  1044  			// Now adjust s.
  1045  			s.startAddr = before.startAddr
  1046  			s.npages += before.npages
  1047  			s.npreleased = before.npreleased // absorb released pages
  1048  			s.needzero |= before.needzero
  1049  			p -= before.npages
  1050  			h.spans[p] = s
  1051  			// The size is potentially changing so the treap needs to delete adjacent nodes and
  1052  			// insert back as a combined node.
  1053  			if h.isLargeSpan(before.npages) {
  1054  				// We have a t, it is large so it has to be in the treap so we can remove it.
  1055  				h.freelarge.removeSpan(before)
  1056  			} else {
  1057  				h.freeList(before.npages).remove(before)
  1058  			}
  1059  			before.state = _MSpanDead
  1060  			h.spanalloc.free(unsafe.Pointer(before))
  1061  		}
  1062  	}
  1063  
  1064  	// Now check to see if next (greater addresses) span is free and can be coalesced.
  1065  	if (p + s.npages) < uintptr(len(h.spans)) {
  1066  		after := h.spans[p+s.npages]
  1067  		if after != nil && after.state == _MSpanFree {
  1068  			s.npages += after.npages
  1069  			s.npreleased += after.npreleased
  1070  			s.needzero |= after.needzero
  1071  			h.spans[p+s.npages-1] = s
  1072  			if h.isLargeSpan(after.npages) {
  1073  				h.freelarge.removeSpan(after)
  1074  			} else {
  1075  				h.freeList(after.npages).remove(after)
  1076  			}
  1077  			after.state = _MSpanDead
  1078  			h.spanalloc.free(unsafe.Pointer(after))
  1079  		}
  1080  	}
  1081  
  1082  	// Insert s into appropriate list or treap.
  1083  	if h.isLargeSpan(s.npages) {
  1084  		h.freelarge.insert(s)
  1085  	} else {
  1086  		h.freeList(s.npages).insert(s)
  1087  	}
  1088  }
  1089  
  1090  func (h *mheap) freeList(npages uintptr) *mSpanList {
  1091  	return &h.free[npages]
  1092  }
  1093  
  1094  func (h *mheap) busyList(npages uintptr) *mSpanList {
  1095  	if npages < uintptr(len(h.busy)) {
  1096  		return &h.busy[npages]
  1097  	}
  1098  	return &h.busylarge
  1099  }
  1100  
  1101  func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
  1102  	s := t.spanKey
  1103  	var sumreleased uintptr
  1104  	if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
  1105  		start := s.base()
  1106  		end := start + s.npages<<_PageShift
  1107  		if physPageSize > _PageSize {
  1108  			// We can only release pages in
  1109  			// physPageSize blocks, so round start
  1110  			// and end in. (Otherwise, madvise
  1111  			// will round them *out* and release
  1112  			// more memory than we want.)
  1113  			start = (start + physPageSize - 1) &^ (physPageSize - 1)
  1114  			end &^= physPageSize - 1
  1115  			if end <= start {
  1116  				// start and end don't span a
  1117  				// whole physical page.
  1118  				return sumreleased
  1119  			}
  1120  		}
  1121  		len := end - start
  1122  		released := len - (s.npreleased << _PageShift)
  1123  		if physPageSize > _PageSize && released == 0 {
  1124  			return sumreleased
  1125  		}
  1126  		memstats.heap_released += uint64(released)
  1127  		sumreleased += released
  1128  		s.npreleased = len >> _PageShift
  1129  		sysUnused(unsafe.Pointer(start), len)
  1130  	}
  1131  	return sumreleased
  1132  }
  1133  
  1134  func scavengelist(list *mSpanList, now, limit uint64) uintptr {
  1135  	if list.isEmpty() {
  1136  		return 0
  1137  	}
  1138  
  1139  	var sumreleased uintptr
  1140  	for s := list.first; s != nil; s = s.next {
  1141  		if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
  1142  			start := s.base()
  1143  			end := start + s.npages<<_PageShift
  1144  			if physPageSize > _PageSize {
  1145  				// We can only release pages in
  1146  				// physPageSize blocks, so round start
  1147  				// and end in. (Otherwise, madvise
  1148  				// will round them *out* and release
  1149  				// more memory than we want.)
  1150  				start = (start + physPageSize - 1) &^ (physPageSize - 1)
  1151  				end &^= physPageSize - 1
  1152  				if end <= start {
  1153  					// start and end don't span a
  1154  					// whole physical page.
  1155  					continue
  1156  				}
  1157  			}
  1158  			len := end - start
  1159  
  1160  			released := len - (s.npreleased << _PageShift)
  1161  			if physPageSize > _PageSize && released == 0 {
  1162  				continue
  1163  			}
  1164  			memstats.heap_released += uint64(released)
  1165  			sumreleased += released
  1166  			s.npreleased = len >> _PageShift
  1167  			sysUnused(unsafe.Pointer(start), len)
  1168  		}
  1169  	}
  1170  	return sumreleased
  1171  }
  1172  
  1173  func (h *mheap) scavenge(k int32, now, limit uint64) {
  1174  	// Disallow malloc or panic while holding the heap lock. We do
  1175  	// this here because this is an non-mallocgc entry-point to
  1176  	// the mheap API.
  1177  	gp := getg()
  1178  	gp.m.mallocing++
  1179  	lock(&h.lock)
  1180  	var sumreleased uintptr
  1181  	for i := 0; i < len(h.free); i++ {
  1182  		sumreleased += scavengelist(&h.free[i], now, limit)
  1183  	}
  1184  	sumreleased += scavengetreap(h.freelarge.treap, now, limit)
  1185  	unlock(&h.lock)
  1186  	gp.m.mallocing--
  1187  
  1188  	if debug.gctrace > 0 {
  1189  		if sumreleased > 0 {
  1190  			print("scvg", k, ": ", sumreleased>>20, " MB released\n")
  1191  		}
  1192  		print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
  1193  	}
  1194  }
  1195  
  1196  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
  1197  func runtime_debug_freeOSMemory() {
  1198  	GC()
  1199  	systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
  1200  }
  1201  
  1202  // Initialize a new span with the given start and npages.
  1203  func (span *mspan) init(base uintptr, npages uintptr) {
  1204  	// span is *not* zeroed.
  1205  	span.next = nil
  1206  	span.prev = nil
  1207  	span.list = nil
  1208  	span.startAddr = base
  1209  	span.npages = npages
  1210  	span.allocCount = 0
  1211  	span.spanclass = 0
  1212  	span.incache = false
  1213  	span.elemsize = 0
  1214  	span.state = _MSpanDead
  1215  	span.unusedsince = 0
  1216  	span.npreleased = 0
  1217  	span.speciallock.key = 0
  1218  	span.specials = nil
  1219  	span.needzero = 0
  1220  	span.freeindex = 0
  1221  	span.allocBits = nil
  1222  	span.gcmarkBits = nil
  1223  }
  1224  
  1225  func (span *mspan) inList() bool {
  1226  	return span.list != nil
  1227  }
  1228  
  1229  // Initialize an empty doubly-linked list.
  1230  func (list *mSpanList) init() {
  1231  	list.first = nil
  1232  	list.last = nil
  1233  }
  1234  
  1235  func (list *mSpanList) remove(span *mspan) {
  1236  	if span.list != list {
  1237  		print("runtime: failed MSpanList_Remove span.npages=", span.npages,
  1238  			" span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
  1239  		throw("MSpanList_Remove")
  1240  	}
  1241  	if list.first == span {
  1242  		list.first = span.next
  1243  	} else {
  1244  		span.prev.next = span.next
  1245  	}
  1246  	if list.last == span {
  1247  		list.last = span.prev
  1248  	} else {
  1249  		span.next.prev = span.prev
  1250  	}
  1251  	span.next = nil
  1252  	span.prev = nil
  1253  	span.list = nil
  1254  }
  1255  
  1256  func (list *mSpanList) isEmpty() bool {
  1257  	return list.first == nil
  1258  }
  1259  
  1260  func (list *mSpanList) insert(span *mspan) {
  1261  	if span.next != nil || span.prev != nil || span.list != nil {
  1262  		//println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list)
  1263  		throw("MSpanList_Insert")
  1264  	}
  1265  	span.next = list.first
  1266  	if list.first != nil {
  1267  		// The list contains at least one span; link it in.
  1268  		// The last span in the list doesn't change.
  1269  		list.first.prev = span
  1270  	} else {
  1271  		// The list contains no spans, so this is also the last span.
  1272  		list.last = span
  1273  	}
  1274  	list.first = span
  1275  	span.list = list
  1276  }
  1277  
  1278  func (list *mSpanList) insertBack(span *mspan) {
  1279  	if span.next != nil || span.prev != nil || span.list != nil {
  1280  		//println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
  1281  		throw("MSpanList_InsertBack")
  1282  	}
  1283  	span.prev = list.last
  1284  	if list.last != nil {
  1285  		// The list contains at least one span.
  1286  		list.last.next = span
  1287  	} else {
  1288  		// The list contains no spans, so this is also the first span.
  1289  		list.first = span
  1290  	}
  1291  	list.last = span
  1292  	span.list = list
  1293  }
  1294  
  1295  // takeAll removes all spans from other and inserts them at the front
  1296  // of list.
  1297  func (list *mSpanList) takeAll(other *mSpanList) {
  1298  	if other.isEmpty() {
  1299  		return
  1300  	}
  1301  
  1302  	// Reparent everything in other to list.
  1303  	for s := other.first; s != nil; s = s.next {
  1304  		s.list = list
  1305  	}
  1306  
  1307  	// Concatenate the lists.
  1308  	if list.isEmpty() {
  1309  		*list = *other
  1310  	} else {
  1311  		// Neither list is empty. Put other before list.
  1312  		other.last.next = list.first
  1313  		list.first.prev = other.last
  1314  		list.first = other.first
  1315  	}
  1316  
  1317  	other.first, other.last = nil, nil
  1318  }
  1319  
  1320  const (
  1321  	_KindSpecialFinalizer = 1
  1322  	_KindSpecialProfile   = 2
  1323  	// Note: The finalizer special must be first because if we're freeing
  1324  	// an object, a finalizer special will cause the freeing operation
  1325  	// to abort, and we want to keep the other special records around
  1326  	// if that happens.
  1327  )
  1328  
  1329  //go:notinheap
  1330  type special struct {
  1331  	next   *special // linked list in span
  1332  	offset uint16   // span offset of object
  1333  	kind   byte     // kind of special
  1334  }
  1335  
  1336  // Adds the special record s to the list of special records for
  1337  // the object p. All fields of s should be filled in except for
  1338  // offset & next, which this routine will fill in.
  1339  // Returns true if the special was successfully added, false otherwise.
  1340  // (The add will fail only if a record with the same p and s->kind
  1341  //  already exists.)
  1342  func addspecial(p unsafe.Pointer, s *special) bool {
  1343  	span := mheap_.lookupMaybe(p)
  1344  	if span == nil {
  1345  		throw("addspecial on invalid pointer")
  1346  	}
  1347  
  1348  	// Ensure that the span is swept.
  1349  	// Sweeping accesses the specials list w/o locks, so we have
  1350  	// to synchronize with it. And it's just much safer.
  1351  	mp := acquirem()
  1352  	span.ensureSwept()
  1353  
  1354  	offset := uintptr(p) - span.base()
  1355  	kind := s.kind
  1356  
  1357  	lock(&span.speciallock)
  1358  
  1359  	// Find splice point, check for existing record.
  1360  	t := &span.specials
  1361  	for {
  1362  		x := *t
  1363  		if x == nil {
  1364  			break
  1365  		}
  1366  		if offset == uintptr(x.offset) && kind == x.kind {
  1367  			unlock(&span.speciallock)
  1368  			releasem(mp)
  1369  			return false // already exists
  1370  		}
  1371  		if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
  1372  			break
  1373  		}
  1374  		t = &x.next
  1375  	}
  1376  
  1377  	// Splice in record, fill in offset.
  1378  	s.offset = uint16(offset)
  1379  	s.next = *t
  1380  	*t = s
  1381  	unlock(&span.speciallock)
  1382  	releasem(mp)
  1383  
  1384  	return true
  1385  }
  1386  
  1387  // Removes the Special record of the given kind for the object p.
  1388  // Returns the record if the record existed, nil otherwise.
  1389  // The caller must FixAlloc_Free the result.
  1390  func removespecial(p unsafe.Pointer, kind uint8) *special {
  1391  	span := mheap_.lookupMaybe(p)
  1392  	if span == nil {
  1393  		throw("removespecial on invalid pointer")
  1394  	}
  1395  
  1396  	// Ensure that the span is swept.
  1397  	// Sweeping accesses the specials list w/o locks, so we have
  1398  	// to synchronize with it. And it's just much safer.
  1399  	mp := acquirem()
  1400  	span.ensureSwept()
  1401  
  1402  	offset := uintptr(p) - span.base()
  1403  
  1404  	lock(&span.speciallock)
  1405  	t := &span.specials
  1406  	for {
  1407  		s := *t
  1408  		if s == nil {
  1409  			break
  1410  		}
  1411  		// This function is used for finalizers only, so we don't check for
  1412  		// "interior" specials (p must be exactly equal to s->offset).
  1413  		if offset == uintptr(s.offset) && kind == s.kind {
  1414  			*t = s.next
  1415  			unlock(&span.speciallock)
  1416  			releasem(mp)
  1417  			return s
  1418  		}
  1419  		t = &s.next
  1420  	}
  1421  	unlock(&span.speciallock)
  1422  	releasem(mp)
  1423  	return nil
  1424  }
  1425  
  1426  // The described object has a finalizer set for it.
  1427  //
  1428  // specialfinalizer is allocated from non-GC'd memory, so any heap
  1429  // pointers must be specially handled.
  1430  //
  1431  //go:notinheap
  1432  type specialfinalizer struct {
  1433  	special special
  1434  	fn      *funcval // May be a heap pointer.
  1435  	nret    uintptr
  1436  	fint    *_type   // May be a heap pointer, but always live.
  1437  	ot      *ptrtype // May be a heap pointer, but always live.
  1438  }
  1439  
  1440  // Adds a finalizer to the object p. Returns true if it succeeded.
  1441  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
  1442  	lock(&mheap_.speciallock)
  1443  	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
  1444  	unlock(&mheap_.speciallock)
  1445  	s.special.kind = _KindSpecialFinalizer
  1446  	s.fn = f
  1447  	s.nret = nret
  1448  	s.fint = fint
  1449  	s.ot = ot
  1450  	if addspecial(p, &s.special) {
  1451  		// This is responsible for maintaining the same
  1452  		// GC-related invariants as markrootSpans in any
  1453  		// situation where it's possible that markrootSpans
  1454  		// has already run but mark termination hasn't yet.
  1455  		if gcphase != _GCoff {
  1456  			_, base, _ := findObject(p)
  1457  			mp := acquirem()
  1458  			gcw := &mp.p.ptr().gcw
  1459  			// Mark everything reachable from the object
  1460  			// so it's retained for the finalizer.
  1461  			scanobject(uintptr(base), gcw)
  1462  			// Mark the finalizer itself, since the
  1463  			// special isn't part of the GC'd heap.
  1464  			scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
  1465  			if gcBlackenPromptly {
  1466  				gcw.dispose()
  1467  			}
  1468  			releasem(mp)
  1469  		}
  1470  		return true
  1471  	}
  1472  
  1473  	// There was an old finalizer
  1474  	lock(&mheap_.speciallock)
  1475  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1476  	unlock(&mheap_.speciallock)
  1477  	return false
  1478  }
  1479  
  1480  // Removes the finalizer (if any) from the object p.
  1481  func removefinalizer(p unsafe.Pointer) {
  1482  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
  1483  	if s == nil {
  1484  		return // there wasn't a finalizer to remove
  1485  	}
  1486  	lock(&mheap_.speciallock)
  1487  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1488  	unlock(&mheap_.speciallock)
  1489  }
  1490  
  1491  // The described object is being heap profiled.
  1492  //
  1493  //go:notinheap
  1494  type specialprofile struct {
  1495  	special special
  1496  	b       *bucket
  1497  }
  1498  
  1499  // Set the heap profile bucket associated with addr to b.
  1500  func setprofilebucket(p unsafe.Pointer, b *bucket) {
  1501  	lock(&mheap_.speciallock)
  1502  	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
  1503  	unlock(&mheap_.speciallock)
  1504  	s.special.kind = _KindSpecialProfile
  1505  	s.b = b
  1506  	if !addspecial(p, &s.special) {
  1507  		throw("setprofilebucket: profile already set")
  1508  	}
  1509  }
  1510  
  1511  // Do whatever cleanup needs to be done to deallocate s. It has
  1512  // already been unlinked from the MSpan specials list.
  1513  func freespecial(s *special, p unsafe.Pointer, size uintptr) {
  1514  	switch s.kind {
  1515  	case _KindSpecialFinalizer:
  1516  		sf := (*specialfinalizer)(unsafe.Pointer(s))
  1517  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
  1518  		lock(&mheap_.speciallock)
  1519  		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
  1520  		unlock(&mheap_.speciallock)
  1521  	case _KindSpecialProfile:
  1522  		sp := (*specialprofile)(unsafe.Pointer(s))
  1523  		mProf_Free(sp.b, size)
  1524  		lock(&mheap_.speciallock)
  1525  		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
  1526  		unlock(&mheap_.speciallock)
  1527  	default:
  1528  		throw("bad special kind")
  1529  		panic("not reached")
  1530  	}
  1531  }
  1532  
  1533  // gcBits is an alloc/mark bitmap. This is always used as *gcBits.
  1534  //
  1535  //go:notinheap
  1536  type gcBits uint8
  1537  
  1538  // bytep returns a pointer to the n'th byte of b.
  1539  func (b *gcBits) bytep(n uintptr) *uint8 {
  1540  	return addb((*uint8)(b), n)
  1541  }
  1542  
  1543  // bitp returns a pointer to the byte containing bit n and a mask for
  1544  // selecting that bit from *bytep.
  1545  func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
  1546  	return b.bytep(n / 8), 1 << (n % 8)
  1547  }
  1548  
  1549  const gcBitsChunkBytes = uintptr(64 << 10)
  1550  const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
  1551  
  1552  type gcBitsHeader struct {
  1553  	free uintptr // free is the index into bits of the next free byte.
  1554  	next uintptr // *gcBits triggers recursive type bug. (issue 14620)
  1555  }
  1556  
  1557  //go:notinheap
  1558  type gcBitsArena struct {
  1559  	// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
  1560  	free uintptr // free is the index into bits of the next free byte; read/write atomically
  1561  	next *gcBitsArena
  1562  	bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
  1563  }
  1564  
  1565  var gcBitsArenas struct {
  1566  	lock     mutex
  1567  	free     *gcBitsArena
  1568  	next     *gcBitsArena // Read atomically. Write atomically under lock.
  1569  	current  *gcBitsArena
  1570  	previous *gcBitsArena
  1571  }
  1572  
  1573  // tryAlloc allocates from b or returns nil if b does not have enough room.
  1574  // This is safe to call concurrently.
  1575  func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
  1576  	if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
  1577  		return nil
  1578  	}
  1579  	// Try to allocate from this block.
  1580  	end := atomic.Xadduintptr(&b.free, bytes)
  1581  	if end > uintptr(len(b.bits)) {
  1582  		return nil
  1583  	}
  1584  	// There was enough room.
  1585  	start := end - bytes
  1586  	return &b.bits[start]
  1587  }
  1588  
  1589  // newMarkBits returns a pointer to 8 byte aligned bytes
  1590  // to be used for a span's mark bits.
  1591  func newMarkBits(nelems uintptr) *gcBits {
  1592  	blocksNeeded := uintptr((nelems + 63) / 64)
  1593  	bytesNeeded := blocksNeeded * 8
  1594  
  1595  	// Try directly allocating from the current head arena.
  1596  	head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
  1597  	if p := head.tryAlloc(bytesNeeded); p != nil {
  1598  		return p
  1599  	}
  1600  
  1601  	// There's not enough room in the head arena. We may need to
  1602  	// allocate a new arena.
  1603  	lock(&gcBitsArenas.lock)
  1604  	// Try the head arena again, since it may have changed. Now
  1605  	// that we hold the lock, the list head can't change, but its
  1606  	// free position still can.
  1607  	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
  1608  		unlock(&gcBitsArenas.lock)
  1609  		return p
  1610  	}
  1611  
  1612  	// Allocate a new arena. This may temporarily drop the lock.
  1613  	fresh := newArenaMayUnlock()
  1614  	// If newArenaMayUnlock dropped the lock, another thread may
  1615  	// have put a fresh arena on the "next" list. Try allocating
  1616  	// from next again.
  1617  	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
  1618  		// Put fresh back on the free list.
  1619  		// TODO: Mark it "already zeroed"
  1620  		fresh.next = gcBitsArenas.free
  1621  		gcBitsArenas.free = fresh
  1622  		unlock(&gcBitsArenas.lock)
  1623  		return p
  1624  	}
  1625  
  1626  	// Allocate from the fresh arena. We haven't linked it in yet, so
  1627  	// this cannot race and is guaranteed to succeed.
  1628  	p := fresh.tryAlloc(bytesNeeded)
  1629  	if p == nil {
  1630  		throw("markBits overflow")
  1631  	}
  1632  
  1633  	// Add the fresh arena to the "next" list.
  1634  	fresh.next = gcBitsArenas.next
  1635  	atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
  1636  
  1637  	unlock(&gcBitsArenas.lock)
  1638  	return p
  1639  }
  1640  
  1641  // newAllocBits returns a pointer to 8 byte aligned bytes
  1642  // to be used for this span's alloc bits.
  1643  // newAllocBits is used to provide newly initialized spans
  1644  // allocation bits. For spans not being initialized the
  1645  // the mark bits are repurposed as allocation bits when
  1646  // the span is swept.
  1647  func newAllocBits(nelems uintptr) *gcBits {
  1648  	return newMarkBits(nelems)
  1649  }
  1650  
  1651  // nextMarkBitArenaEpoch establishes a new epoch for the arenas
  1652  // holding the mark bits. The arenas are named relative to the
  1653  // current GC cycle which is demarcated by the call to finishweep_m.
  1654  //
  1655  // All current spans have been swept.
  1656  // During that sweep each span allocated room for its gcmarkBits in
  1657  // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
  1658  // where the GC will mark objects and after each span is swept these bits
  1659  // will be used to allocate objects.
  1660  // gcBitsArenas.current becomes gcBitsArenas.previous where the span's
  1661  // gcAllocBits live until all the spans have been swept during this GC cycle.
  1662  // The span's sweep extinguishes all the references to gcBitsArenas.previous
  1663  // by pointing gcAllocBits into the gcBitsArenas.current.
  1664  // The gcBitsArenas.previous is released to the gcBitsArenas.free list.
  1665  func nextMarkBitArenaEpoch() {
  1666  	lock(&gcBitsArenas.lock)
  1667  	if gcBitsArenas.previous != nil {
  1668  		if gcBitsArenas.free == nil {
  1669  			gcBitsArenas.free = gcBitsArenas.previous
  1670  		} else {
  1671  			// Find end of previous arenas.
  1672  			last := gcBitsArenas.previous
  1673  			for last = gcBitsArenas.previous; last.next != nil; last = last.next {
  1674  			}
  1675  			last.next = gcBitsArenas.free
  1676  			gcBitsArenas.free = gcBitsArenas.previous
  1677  		}
  1678  	}
  1679  	gcBitsArenas.previous = gcBitsArenas.current
  1680  	gcBitsArenas.current = gcBitsArenas.next
  1681  	atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
  1682  	unlock(&gcBitsArenas.lock)
  1683  }
  1684  
  1685  // newArenaMayUnlock allocates and zeroes a gcBits arena.
  1686  // The caller must hold gcBitsArena.lock. This may temporarily release it.
  1687  func newArenaMayUnlock() *gcBitsArena {
  1688  	var result *gcBitsArena
  1689  	if gcBitsArenas.free == nil {
  1690  		unlock(&gcBitsArenas.lock)
  1691  		result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
  1692  		if result == nil {
  1693  			throw("runtime: cannot allocate memory")
  1694  		}
  1695  		lock(&gcBitsArenas.lock)
  1696  	} else {
  1697  		result = gcBitsArenas.free
  1698  		gcBitsArenas.free = gcBitsArenas.free.next
  1699  		memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
  1700  	}
  1701  	result.next = nil
  1702  	// If result.bits is not 8 byte aligned adjust index so
  1703  	// that &result.bits[result.free] is 8 byte aligned.
  1704  	if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
  1705  		result.free = 0
  1706  	} else {
  1707  		result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
  1708  	}
  1709  	return result
  1710  }