github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/runtime/mheap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.go for overview.
     8  
     9  package runtime
    10  
    11  import (
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  // minPhysPageSize is a lower-bound on the physical page size. The
    18  // true physical page size may be larger than this. In contrast,
    19  // sys.PhysPageSize is an upper-bound on the physical page size.
    20  const minPhysPageSize = 4096
    21  
    22  // Main malloc heap.
    23  // The heap itself is the "free[]" and "large" arrays,
    24  // but all the other global data is here too.
    25  //
    26  // mheap must not be heap-allocated because it contains mSpanLists,
    27  // which must not be heap-allocated.
    28  //
    29  //go:notinheap
    30  type mheap struct {
    31  	lock      mutex
    32  	free      [_MaxMHeapList]mSpanList // free lists of given length
    33  	freelarge mSpanList                // free lists length >= _MaxMHeapList
    34  	busy      [_MaxMHeapList]mSpanList // busy lists of large objects of given length
    35  	busylarge mSpanList                // busy lists of large objects length >= _MaxMHeapList
    36  	sweepgen  uint32                   // sweep generation, see comment in mspan
    37  	sweepdone uint32                   // all spans are swept
    38  
    39  	// allspans is a slice of all mspans ever created. Each mspan
    40  	// appears exactly once.
    41  	//
    42  	// The memory for allspans is manually managed and can be
    43  	// reallocated and move as the heap grows.
    44  	//
    45  	// In general, allspans is protected by mheap_.lock, which
    46  	// prevents concurrent access as well as freeing the backing
    47  	// store. Accesses during STW might not hold the lock, but
    48  	// must ensure that allocation cannot happen around the
    49  	// access (since that may free the backing store).
    50  	allspans []*mspan // all spans out there
    51  
    52  	// spans is a lookup table to map virtual address page IDs to *mspan.
    53  	// For allocated spans, their pages map to the span itself.
    54  	// For free spans, only the lowest and highest pages map to the span itself.
    55  	// Internal pages map to an arbitrary span.
    56  	// For pages that have never been allocated, spans entries are nil.
    57  	//
    58  	// This is backed by a reserved region of the address space so
    59  	// it can grow without moving. The memory up to len(spans) is
    60  	// mapped. cap(spans) indicates the total reserved memory.
    61  	spans []*mspan
    62  
    63  	// sweepSpans contains two mspan stacks: one of swept in-use
    64  	// spans, and one of unswept in-use spans. These two trade
    65  	// roles on each GC cycle. Since the sweepgen increases by 2
    66  	// on each cycle, this means the swept spans are in
    67  	// sweepSpans[sweepgen/2%2] and the unswept spans are in
    68  	// sweepSpans[1-sweepgen/2%2]. Sweeping pops spans from the
    69  	// unswept stack and pushes spans that are still in-use on the
    70  	// swept stack. Likewise, allocating an in-use span pushes it
    71  	// on the swept stack.
    72  	sweepSpans [2]gcSweepBuf
    73  
    74  	_ uint32 // align uint64 fields on 32-bit for atomics
    75  
    76  	// Proportional sweep
    77  	pagesInUse        uint64  // pages of spans in stats _MSpanInUse; R/W with mheap.lock
    78  	spanBytesAlloc    uint64  // bytes of spans allocated this cycle; updated atomically
    79  	pagesSwept        uint64  // pages swept this cycle; updated atomically
    80  	sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
    81  	// TODO(austin): pagesInUse should be a uintptr, but the 386
    82  	// compiler can't 8-byte align fields.
    83  
    84  	// Malloc stats.
    85  	largefree  uint64                  // bytes freed for large objects (>maxsmallsize)
    86  	nlargefree uint64                  // number of frees for large objects (>maxsmallsize)
    87  	nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
    88  
    89  	// range of addresses we might see in the heap
    90  	bitmap         uintptr // Points to one byte past the end of the bitmap
    91  	bitmap_mapped  uintptr
    92  	arena_start    uintptr
    93  	arena_used     uintptr // always mHeap_Map{Bits,Spans} before updating
    94  	arena_end      uintptr
    95  	arena_reserved bool
    96  
    97  	// central free lists for small size classes.
    98  	// the padding makes sure that the MCentrals are
    99  	// spaced CacheLineSize bytes apart, so that each MCentral.lock
   100  	// gets its own cache line.
   101  	central [_NumSizeClasses]struct {
   102  		mcentral mcentral
   103  		pad      [sys.CacheLineSize]byte
   104  	}
   105  
   106  	spanalloc             fixalloc // allocator for span*
   107  	cachealloc            fixalloc // allocator for mcache*
   108  	specialfinalizeralloc fixalloc // allocator for specialfinalizer*
   109  	specialprofilealloc   fixalloc // allocator for specialprofile*
   110  	speciallock           mutex    // lock for special record allocators.
   111  }
   112  
   113  var mheap_ mheap
   114  
   115  // An MSpan is a run of pages.
   116  //
   117  // When a MSpan is in the heap free list, state == MSpanFree
   118  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
   119  //
   120  // When a MSpan is allocated, state == MSpanInUse or MSpanStack
   121  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
   122  
   123  // Every MSpan is in one doubly-linked list,
   124  // either one of the MHeap's free lists or one of the
   125  // MCentral's span lists.
   126  
   127  // An MSpan representing actual memory has state _MSpanInUse,
   128  // _MSpanStack, or _MSpanFree. Transitions between these states are
   129  // constrained as follows:
   130  //
   131  // * A span may transition from free to in-use or stack during any GC
   132  //   phase.
   133  //
   134  // * During sweeping (gcphase == _GCoff), a span may transition from
   135  //   in-use to free (as a result of sweeping) or stack to free (as a
   136  //   result of stacks being freed).
   137  //
   138  // * During GC (gcphase != _GCoff), a span *must not* transition from
   139  //   stack or in-use to free. Because concurrent GC may read a pointer
   140  //   and then look up its span, the span state must be monotonic.
   141  type mSpanState uint8
   142  
   143  const (
   144  	_MSpanDead  mSpanState = iota
   145  	_MSpanInUse            // allocated for garbage collected heap
   146  	_MSpanStack            // allocated for use by stack allocator
   147  	_MSpanFree
   148  )
   149  
   150  // mSpanStateNames are the names of the span states, indexed by
   151  // mSpanState.
   152  var mSpanStateNames = []string{
   153  	"_MSpanDead",
   154  	"_MSpanInUse",
   155  	"_MSpanStack",
   156  	"_MSpanFree",
   157  }
   158  
   159  // mSpanList heads a linked list of spans.
   160  //
   161  //go:notinheap
   162  type mSpanList struct {
   163  	first *mspan // first span in list, or nil if none
   164  	last  *mspan // last span in list, or nil if none
   165  }
   166  
   167  //go:notinheap
   168  type mspan struct {
   169  	next *mspan     // next span in list, or nil if none
   170  	prev *mspan     // previous span in list, or nil if none
   171  	list *mSpanList // For debugging. TODO: Remove.
   172  
   173  	startAddr     uintptr   // address of first byte of span aka s.base()
   174  	npages        uintptr   // number of pages in span
   175  	stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
   176  
   177  	// freeindex is the slot index between 0 and nelems at which to begin scanning
   178  	// for the next free object in this span.
   179  	// Each allocation scans allocBits starting at freeindex until it encounters a 0
   180  	// indicating a free object. freeindex is then adjusted so that subsequent scans begin
   181  	// just past the the newly discovered free object.
   182  	//
   183  	// If freeindex == nelem, this span has no free objects.
   184  	//
   185  	// allocBits is a bitmap of objects in this span.
   186  	// If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
   187  	// then object n is free;
   188  	// otherwise, object n is allocated. Bits starting at nelem are
   189  	// undefined and should never be referenced.
   190  	//
   191  	// Object n starts at address n*elemsize + (start << pageShift).
   192  	freeindex uintptr
   193  	// TODO: Look up nelems from sizeclass and remove this field if it
   194  	// helps performance.
   195  	nelems uintptr // number of object in the span.
   196  
   197  	// Cache of the allocBits at freeindex. allocCache is shifted
   198  	// such that the lowest bit corresponds to the bit freeindex.
   199  	// allocCache holds the complement of allocBits, thus allowing
   200  	// ctz (count trailing zero) to use it directly.
   201  	// allocCache may contain bits beyond s.nelems; the caller must ignore
   202  	// these.
   203  	allocCache uint64
   204  
   205  	// allocBits and gcmarkBits hold pointers to a span's mark and
   206  	// allocation bits. The pointers are 8 byte aligned.
   207  	// There are three arenas where this data is held.
   208  	// free: Dirty arenas that are no longer accessed
   209  	//       and can be reused.
   210  	// next: Holds information to be used in the next GC cycle.
   211  	// current: Information being used during this GC cycle.
   212  	// previous: Information being used during the last GC cycle.
   213  	// A new GC cycle starts with the call to finishsweep_m.
   214  	// finishsweep_m moves the previous arena to the free arena,
   215  	// the current arena to the previous arena, and
   216  	// the next arena to the current arena.
   217  	// The next arena is populated as the spans request
   218  	// memory to hold gcmarkBits for the next GC cycle as well
   219  	// as allocBits for newly allocated spans.
   220  	//
   221  	// The pointer arithmetic is done "by hand" instead of using
   222  	// arrays to avoid bounds checks along critical performance
   223  	// paths.
   224  	// The sweep will free the old allocBits and set allocBits to the
   225  	// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
   226  	// out memory.
   227  	allocBits  *uint8
   228  	gcmarkBits *uint8
   229  
   230  	// sweep generation:
   231  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   232  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   233  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   234  	// h->sweepgen is incremented by 2 after every GC
   235  
   236  	sweepgen    uint32
   237  	divMul      uint16     // for divide by elemsize - divMagic.mul
   238  	baseMask    uint16     // if non-0, elemsize is a power of 2, & this will get object allocation base
   239  	allocCount  uint16     // capacity - number of objects in freelist
   240  	sizeclass   uint8      // size class
   241  	incache     bool       // being used by an mcache
   242  	state       mSpanState // mspaninuse etc
   243  	needzero    uint8      // needs to be zeroed before allocation
   244  	divShift    uint8      // for divide by elemsize - divMagic.shift
   245  	divShift2   uint8      // for divide by elemsize - divMagic.shift2
   246  	elemsize    uintptr    // computed from sizeclass or from npages
   247  	unusedsince int64      // first time spotted by gc in mspanfree state
   248  	npreleased  uintptr    // number of pages released to the os
   249  	limit       uintptr    // end of data in span
   250  	speciallock mutex      // guards specials list
   251  	specials    *special   // linked list of special records sorted by offset.
   252  }
   253  
   254  func (s *mspan) base() uintptr {
   255  	return s.startAddr
   256  }
   257  
   258  func (s *mspan) layout() (size, n, total uintptr) {
   259  	total = s.npages << _PageShift
   260  	size = s.elemsize
   261  	if size > 0 {
   262  		n = total / size
   263  	}
   264  	return
   265  }
   266  
   267  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
   268  	h := (*mheap)(vh)
   269  	s := (*mspan)(p)
   270  	if len(h.allspans) >= cap(h.allspans) {
   271  		n := 64 * 1024 / sys.PtrSize
   272  		if n < cap(h.allspans)*3/2 {
   273  			n = cap(h.allspans) * 3 / 2
   274  		}
   275  		var new []*mspan
   276  		sp := (*slice)(unsafe.Pointer(&new))
   277  		sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
   278  		if sp.array == nil {
   279  			throw("runtime: cannot allocate memory")
   280  		}
   281  		sp.len = len(h.allspans)
   282  		sp.cap = n
   283  		if len(h.allspans) > 0 {
   284  			copy(new, h.allspans)
   285  		}
   286  		oldAllspans := h.allspans
   287  		h.allspans = new
   288  		if len(oldAllspans) != 0 {
   289  			sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
   290  		}
   291  	}
   292  	h.allspans = append(h.allspans, s)
   293  }
   294  
   295  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   296  // It returns false for pointers into stack spans.
   297  // Non-preemptible because it is used by write barriers.
   298  //go:nowritebarrier
   299  //go:nosplit
   300  func inheap(b uintptr) bool {
   301  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   302  		return false
   303  	}
   304  	// Not a beginning of a block, consult span table to find the block beginning.
   305  	s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
   306  	if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse {
   307  		return false
   308  	}
   309  	return true
   310  }
   311  
   312  // inHeapOrStack is a variant of inheap that returns true for pointers into stack spans.
   313  //go:nowritebarrier
   314  //go:nosplit
   315  func inHeapOrStack(b uintptr) bool {
   316  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   317  		return false
   318  	}
   319  	// Not a beginning of a block, consult span table to find the block beginning.
   320  	s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
   321  	if s == nil || b < s.base() {
   322  		return false
   323  	}
   324  	switch s.state {
   325  	case mSpanInUse:
   326  		return b < s.limit
   327  	case _MSpanStack:
   328  		return b < s.base()+s.npages<<_PageShift
   329  	default:
   330  		return false
   331  	}
   332  }
   333  
   334  // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
   335  // Use the functions instead.
   336  
   337  // spanOf returns the span of p. If p does not point into the heap or
   338  // no span contains p, spanOf returns nil.
   339  func spanOf(p uintptr) *mspan {
   340  	if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
   341  		return nil
   342  	}
   343  	return spanOfUnchecked(p)
   344  }
   345  
   346  // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
   347  // that p points into the heap (that is, mheap_.arena_start <= p <
   348  // mheap_.arena_used).
   349  func spanOfUnchecked(p uintptr) *mspan {
   350  	return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
   351  }
   352  
   353  func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
   354  	_g_ := getg()
   355  
   356  	_g_.m.mcache.local_nlookup++
   357  	if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
   358  		// purge cache stats to prevent overflow
   359  		lock(&mheap_.lock)
   360  		purgecachedstats(_g_.m.mcache)
   361  		unlock(&mheap_.lock)
   362  	}
   363  
   364  	s := mheap_.lookupMaybe(unsafe.Pointer(v))
   365  	if sp != nil {
   366  		*sp = s
   367  	}
   368  	if s == nil {
   369  		if base != nil {
   370  			*base = 0
   371  		}
   372  		if size != nil {
   373  			*size = 0
   374  		}
   375  		return 0
   376  	}
   377  
   378  	p := s.base()
   379  	if s.sizeclass == 0 {
   380  		// Large object.
   381  		if base != nil {
   382  			*base = p
   383  		}
   384  		if size != nil {
   385  			*size = s.npages << _PageShift
   386  		}
   387  		return 1
   388  	}
   389  
   390  	n := s.elemsize
   391  	if base != nil {
   392  		i := (v - p) / n
   393  		*base = p + i*n
   394  	}
   395  	if size != nil {
   396  		*size = n
   397  	}
   398  
   399  	return 1
   400  }
   401  
   402  // Initialize the heap.
   403  func (h *mheap) init(spansStart, spansBytes uintptr) {
   404  	h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
   405  	h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
   406  	h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
   407  	h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
   408  
   409  	// Don't zero mspan allocations. Background sweeping can
   410  	// inspect a span concurrently with allocating it, so it's
   411  	// important that the span's sweepgen survive across freeing
   412  	// and re-allocating a span to prevent background sweeping
   413  	// from improperly cas'ing it from 0.
   414  	//
   415  	// This is safe because mspan contains no heap pointers.
   416  	h.spanalloc.zero = false
   417  
   418  	// h->mapcache needs no init
   419  	for i := range h.free {
   420  		h.free[i].init()
   421  		h.busy[i].init()
   422  	}
   423  
   424  	h.freelarge.init()
   425  	h.busylarge.init()
   426  	for i := range h.central {
   427  		h.central[i].mcentral.init(int32(i))
   428  	}
   429  
   430  	sp := (*slice)(unsafe.Pointer(&h.spans))
   431  	sp.array = unsafe.Pointer(spansStart)
   432  	sp.len = 0
   433  	sp.cap = int(spansBytes / sys.PtrSize)
   434  }
   435  
   436  // mHeap_MapSpans makes sure that the spans are mapped
   437  // up to the new value of arena_used.
   438  //
   439  // It must be called with the expected new value of arena_used,
   440  // *before* h.arena_used has been updated.
   441  // Waiting to update arena_used until after the memory has been mapped
   442  // avoids faults when other threads try access the bitmap immediately
   443  // after observing the change to arena_used.
   444  func (h *mheap) mapSpans(arena_used uintptr) {
   445  	// Map spans array, PageSize at a time.
   446  	n := arena_used
   447  	n -= h.arena_start
   448  	n = n / _PageSize * sys.PtrSize
   449  	n = round(n, physPageSize)
   450  	need := n / unsafe.Sizeof(h.spans[0])
   451  	have := uintptr(len(h.spans))
   452  	if have >= need {
   453  		return
   454  	}
   455  	h.spans = h.spans[:need]
   456  	sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys)
   457  }
   458  
   459  // Sweeps spans in list until reclaims at least npages into heap.
   460  // Returns the actual number of pages reclaimed.
   461  func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
   462  	n := uintptr(0)
   463  	sg := mheap_.sweepgen
   464  retry:
   465  	for s := list.first; s != nil; s = s.next {
   466  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   467  			list.remove(s)
   468  			// swept spans are at the end of the list
   469  			list.insertBack(s)
   470  			unlock(&h.lock)
   471  			snpages := s.npages
   472  			if s.sweep(false) {
   473  				n += snpages
   474  			}
   475  			lock(&h.lock)
   476  			if n >= npages {
   477  				return n
   478  			}
   479  			// the span could have been moved elsewhere
   480  			goto retry
   481  		}
   482  		if s.sweepgen == sg-1 {
   483  			// the span is being sweept by background sweeper, skip
   484  			continue
   485  		}
   486  		// already swept empty span,
   487  		// all subsequent ones must also be either swept or in process of sweeping
   488  		break
   489  	}
   490  	return n
   491  }
   492  
   493  // Sweeps and reclaims at least npage pages into heap.
   494  // Called before allocating npage pages.
   495  func (h *mheap) reclaim(npage uintptr) {
   496  	// First try to sweep busy spans with large objects of size >= npage,
   497  	// this has good chances of reclaiming the necessary space.
   498  	for i := int(npage); i < len(h.busy); i++ {
   499  		if h.reclaimList(&h.busy[i], npage) != 0 {
   500  			return // Bingo!
   501  		}
   502  	}
   503  
   504  	// Then -- even larger objects.
   505  	if h.reclaimList(&h.busylarge, npage) != 0 {
   506  		return // Bingo!
   507  	}
   508  
   509  	// Now try smaller objects.
   510  	// One such object is not enough, so we need to reclaim several of them.
   511  	reclaimed := uintptr(0)
   512  	for i := 0; i < int(npage) && i < len(h.busy); i++ {
   513  		reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
   514  		if reclaimed >= npage {
   515  			return
   516  		}
   517  	}
   518  
   519  	// Now sweep everything that is not yet swept.
   520  	unlock(&h.lock)
   521  	for {
   522  		n := sweepone()
   523  		if n == ^uintptr(0) { // all spans are swept
   524  			break
   525  		}
   526  		reclaimed += n
   527  		if reclaimed >= npage {
   528  			break
   529  		}
   530  	}
   531  	lock(&h.lock)
   532  }
   533  
   534  // Allocate a new span of npage pages from the heap for GC'd memory
   535  // and record its size class in the HeapMap and HeapMapCache.
   536  func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
   537  	_g_ := getg()
   538  	if _g_ != _g_.m.g0 {
   539  		throw("_mheap_alloc not on g0 stack")
   540  	}
   541  	lock(&h.lock)
   542  
   543  	// To prevent excessive heap growth, before allocating n pages
   544  	// we need to sweep and reclaim at least n pages.
   545  	if h.sweepdone == 0 {
   546  		// TODO(austin): This tends to sweep a large number of
   547  		// spans in order to find a few completely free spans
   548  		// (for example, in the garbage benchmark, this sweeps
   549  		// ~30x the number of pages its trying to allocate).
   550  		// If GC kept a bit for whether there were any marks
   551  		// in a span, we could release these free spans
   552  		// at the end of GC and eliminate this entirely.
   553  		h.reclaim(npage)
   554  	}
   555  
   556  	// transfer stats from cache to global
   557  	memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
   558  	_g_.m.mcache.local_scan = 0
   559  	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
   560  	_g_.m.mcache.local_tinyallocs = 0
   561  
   562  	s := h.allocSpanLocked(npage)
   563  	if s != nil {
   564  		// Record span info, because gc needs to be
   565  		// able to map interior pointer to containing span.
   566  		atomic.Store(&s.sweepgen, h.sweepgen)
   567  		h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
   568  		s.state = _MSpanInUse
   569  		s.allocCount = 0
   570  		s.sizeclass = uint8(sizeclass)
   571  		if sizeclass == 0 {
   572  			s.elemsize = s.npages << _PageShift
   573  			s.divShift = 0
   574  			s.divMul = 0
   575  			s.divShift2 = 0
   576  			s.baseMask = 0
   577  		} else {
   578  			s.elemsize = uintptr(class_to_size[sizeclass])
   579  			m := &class_to_divmagic[sizeclass]
   580  			s.divShift = m.shift
   581  			s.divMul = m.mul
   582  			s.divShift2 = m.shift2
   583  			s.baseMask = m.baseMask
   584  		}
   585  
   586  		// update stats, sweep lists
   587  		h.pagesInUse += uint64(npage)
   588  		if large {
   589  			memstats.heap_objects++
   590  			atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
   591  			// Swept spans are at the end of lists.
   592  			if s.npages < uintptr(len(h.free)) {
   593  				h.busy[s.npages].insertBack(s)
   594  			} else {
   595  				h.busylarge.insertBack(s)
   596  			}
   597  		}
   598  	}
   599  	// heap_scan and heap_live were updated.
   600  	if gcBlackenEnabled != 0 {
   601  		gcController.revise()
   602  	}
   603  
   604  	if trace.enabled {
   605  		traceHeapAlloc()
   606  	}
   607  
   608  	// h.spans is accessed concurrently without synchronization
   609  	// from other threads. Hence, there must be a store/store
   610  	// barrier here to ensure the writes to h.spans above happen
   611  	// before the caller can publish a pointer p to an object
   612  	// allocated from s. As soon as this happens, the garbage
   613  	// collector running on another processor could read p and
   614  	// look up s in h.spans. The unlock acts as the barrier to
   615  	// order these writes. On the read side, the data dependency
   616  	// between p and the index in h.spans orders the reads.
   617  	unlock(&h.lock)
   618  	return s
   619  }
   620  
   621  func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
   622  	// Don't do any operations that lock the heap on the G stack.
   623  	// It might trigger stack growth, and the stack growth code needs
   624  	// to be able to allocate heap.
   625  	var s *mspan
   626  	systemstack(func() {
   627  		s = h.alloc_m(npage, sizeclass, large)
   628  	})
   629  
   630  	if s != nil {
   631  		if needzero && s.needzero != 0 {
   632  			memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift)
   633  		}
   634  		s.needzero = 0
   635  	}
   636  	return s
   637  }
   638  
   639  func (h *mheap) allocStack(npage uintptr) *mspan {
   640  	_g_ := getg()
   641  	if _g_ != _g_.m.g0 {
   642  		throw("mheap_allocstack not on g0 stack")
   643  	}
   644  	lock(&h.lock)
   645  	s := h.allocSpanLocked(npage)
   646  	if s != nil {
   647  		s.state = _MSpanStack
   648  		s.stackfreelist = 0
   649  		s.allocCount = 0
   650  		memstats.stacks_inuse += uint64(s.npages << _PageShift)
   651  	}
   652  
   653  	// This unlock acts as a release barrier. See mHeap_Alloc_m.
   654  	unlock(&h.lock)
   655  	return s
   656  }
   657  
   658  // Allocates a span of the given size.  h must be locked.
   659  // The returned span has been removed from the
   660  // free list, but its state is still MSpanFree.
   661  func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
   662  	var list *mSpanList
   663  	var s *mspan
   664  
   665  	// Try in fixed-size lists up to max.
   666  	for i := int(npage); i < len(h.free); i++ {
   667  		list = &h.free[i]
   668  		if !list.isEmpty() {
   669  			s = list.first
   670  			goto HaveSpan
   671  		}
   672  	}
   673  
   674  	// Best fit in list of large spans.
   675  	list = &h.freelarge
   676  	s = h.allocLarge(npage)
   677  	if s == nil {
   678  		if !h.grow(npage) {
   679  			return nil
   680  		}
   681  		s = h.allocLarge(npage)
   682  		if s == nil {
   683  			return nil
   684  		}
   685  	}
   686  
   687  HaveSpan:
   688  	// Mark span in use.
   689  	if s.state != _MSpanFree {
   690  		throw("MHeap_AllocLocked - MSpan not free")
   691  	}
   692  	if s.npages < npage {
   693  		throw("MHeap_AllocLocked - bad npages")
   694  	}
   695  	list.remove(s)
   696  	if s.inList() {
   697  		throw("still in list")
   698  	}
   699  	if s.npreleased > 0 {
   700  		sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
   701  		memstats.heap_released -= uint64(s.npreleased << _PageShift)
   702  		s.npreleased = 0
   703  	}
   704  
   705  	if s.npages > npage {
   706  		// Trim extra and put it back in the heap.
   707  		t := (*mspan)(h.spanalloc.alloc())
   708  		t.init(s.base()+npage<<_PageShift, s.npages-npage)
   709  		s.npages = npage
   710  		p := (t.base() - h.arena_start) >> _PageShift
   711  		if p > 0 {
   712  			h.spans[p-1] = s
   713  		}
   714  		h.spans[p] = t
   715  		h.spans[p+t.npages-1] = t
   716  		t.needzero = s.needzero
   717  		s.state = _MSpanStack // prevent coalescing with s
   718  		t.state = _MSpanStack
   719  		h.freeSpanLocked(t, false, false, s.unusedsince)
   720  		s.state = _MSpanFree
   721  	}
   722  	s.unusedsince = 0
   723  
   724  	p := (s.base() - h.arena_start) >> _PageShift
   725  	for n := uintptr(0); n < npage; n++ {
   726  		h.spans[p+n] = s
   727  	}
   728  
   729  	memstats.heap_inuse += uint64(npage << _PageShift)
   730  	memstats.heap_idle -= uint64(npage << _PageShift)
   731  
   732  	//println("spanalloc", hex(s.start<<_PageShift))
   733  	if s.inList() {
   734  		throw("still in list")
   735  	}
   736  	return s
   737  }
   738  
   739  // Allocate a span of exactly npage pages from the list of large spans.
   740  func (h *mheap) allocLarge(npage uintptr) *mspan {
   741  	return bestFit(&h.freelarge, npage, nil)
   742  }
   743  
   744  // Search list for smallest span with >= npage pages.
   745  // If there are multiple smallest spans, take the one
   746  // with the earliest starting address.
   747  func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan {
   748  	for s := list.first; s != nil; s = s.next {
   749  		if s.npages < npage {
   750  			continue
   751  		}
   752  		if best == nil || s.npages < best.npages || (s.npages == best.npages && s.base() < best.base()) {
   753  			best = s
   754  		}
   755  	}
   756  	return best
   757  }
   758  
   759  // Try to add at least npage pages of memory to the heap,
   760  // returning whether it worked.
   761  //
   762  // h must be locked.
   763  func (h *mheap) grow(npage uintptr) bool {
   764  	// Ask for a big chunk, to reduce the number of mappings
   765  	// the operating system needs to track; also amortizes
   766  	// the overhead of an operating system mapping.
   767  	// Allocate a multiple of 64kB.
   768  	npage = round(npage, (64<<10)/_PageSize)
   769  	ask := npage << _PageShift
   770  	if ask < _HeapAllocChunk {
   771  		ask = _HeapAllocChunk
   772  	}
   773  
   774  	v := h.sysAlloc(ask)
   775  	if v == nil {
   776  		if ask > npage<<_PageShift {
   777  			ask = npage << _PageShift
   778  			v = h.sysAlloc(ask)
   779  		}
   780  		if v == nil {
   781  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
   782  			return false
   783  		}
   784  	}
   785  
   786  	// Create a fake "in use" span and free it, so that the
   787  	// right coalescing happens.
   788  	s := (*mspan)(h.spanalloc.alloc())
   789  	s.init(uintptr(v), ask>>_PageShift)
   790  	p := (s.base() - h.arena_start) >> _PageShift
   791  	for i := p; i < p+s.npages; i++ {
   792  		h.spans[i] = s
   793  	}
   794  	atomic.Store(&s.sweepgen, h.sweepgen)
   795  	s.state = _MSpanInUse
   796  	h.pagesInUse += uint64(s.npages)
   797  	h.freeSpanLocked(s, false, true, 0)
   798  	return true
   799  }
   800  
   801  // Look up the span at the given address.
   802  // Address is guaranteed to be in map
   803  // and is guaranteed to be start or end of span.
   804  func (h *mheap) lookup(v unsafe.Pointer) *mspan {
   805  	p := uintptr(v)
   806  	p -= h.arena_start
   807  	return h.spans[p>>_PageShift]
   808  }
   809  
   810  // Look up the span at the given address.
   811  // Address is *not* guaranteed to be in map
   812  // and may be anywhere in the span.
   813  // Map entries for the middle of a span are only
   814  // valid for allocated spans. Free spans may have
   815  // other garbage in their middles, so we have to
   816  // check for that.
   817  func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
   818  	if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
   819  		return nil
   820  	}
   821  	s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift]
   822  	if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
   823  		return nil
   824  	}
   825  	return s
   826  }
   827  
   828  // Free the span back into the heap.
   829  func (h *mheap) freeSpan(s *mspan, acct int32) {
   830  	systemstack(func() {
   831  		mp := getg().m
   832  		lock(&h.lock)
   833  		memstats.heap_scan += uint64(mp.mcache.local_scan)
   834  		mp.mcache.local_scan = 0
   835  		memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
   836  		mp.mcache.local_tinyallocs = 0
   837  		if msanenabled {
   838  			// Tell msan that this entire span is no longer in use.
   839  			base := unsafe.Pointer(s.base())
   840  			bytes := s.npages << _PageShift
   841  			msanfree(base, bytes)
   842  		}
   843  		if acct != 0 {
   844  			memstats.heap_objects--
   845  		}
   846  		if gcBlackenEnabled != 0 {
   847  			// heap_scan changed.
   848  			gcController.revise()
   849  		}
   850  		h.freeSpanLocked(s, true, true, 0)
   851  		unlock(&h.lock)
   852  	})
   853  }
   854  
   855  func (h *mheap) freeStack(s *mspan) {
   856  	_g_ := getg()
   857  	if _g_ != _g_.m.g0 {
   858  		throw("mheap_freestack not on g0 stack")
   859  	}
   860  	s.needzero = 1
   861  	lock(&h.lock)
   862  	memstats.stacks_inuse -= uint64(s.npages << _PageShift)
   863  	h.freeSpanLocked(s, true, true, 0)
   864  	unlock(&h.lock)
   865  }
   866  
   867  // s must be on a busy list (h.busy or h.busylarge) or unlinked.
   868  func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
   869  	switch s.state {
   870  	case _MSpanStack:
   871  		if s.allocCount != 0 {
   872  			throw("MHeap_FreeSpanLocked - invalid stack free")
   873  		}
   874  	case _MSpanInUse:
   875  		if s.allocCount != 0 || s.sweepgen != h.sweepgen {
   876  			print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
   877  			throw("MHeap_FreeSpanLocked - invalid free")
   878  		}
   879  		h.pagesInUse -= uint64(s.npages)
   880  	default:
   881  		throw("MHeap_FreeSpanLocked - invalid span state")
   882  	}
   883  
   884  	if acctinuse {
   885  		memstats.heap_inuse -= uint64(s.npages << _PageShift)
   886  	}
   887  	if acctidle {
   888  		memstats.heap_idle += uint64(s.npages << _PageShift)
   889  	}
   890  	s.state = _MSpanFree
   891  	if s.inList() {
   892  		h.busyList(s.npages).remove(s)
   893  	}
   894  
   895  	// Stamp newly unused spans. The scavenger will use that
   896  	// info to potentially give back some pages to the OS.
   897  	s.unusedsince = unusedsince
   898  	if unusedsince == 0 {
   899  		s.unusedsince = nanotime()
   900  	}
   901  	s.npreleased = 0
   902  
   903  	// Coalesce with earlier, later spans.
   904  	p := (s.base() - h.arena_start) >> _PageShift
   905  	if p > 0 {
   906  		t := h.spans[p-1]
   907  		if t != nil && t.state == _MSpanFree {
   908  			s.startAddr = t.startAddr
   909  			s.npages += t.npages
   910  			s.npreleased = t.npreleased // absorb released pages
   911  			s.needzero |= t.needzero
   912  			p -= t.npages
   913  			h.spans[p] = s
   914  			h.freeList(t.npages).remove(t)
   915  			t.state = _MSpanDead
   916  			h.spanalloc.free(unsafe.Pointer(t))
   917  		}
   918  	}
   919  	if (p + s.npages) < uintptr(len(h.spans)) {
   920  		t := h.spans[p+s.npages]
   921  		if t != nil && t.state == _MSpanFree {
   922  			s.npages += t.npages
   923  			s.npreleased += t.npreleased
   924  			s.needzero |= t.needzero
   925  			h.spans[p+s.npages-1] = s
   926  			h.freeList(t.npages).remove(t)
   927  			t.state = _MSpanDead
   928  			h.spanalloc.free(unsafe.Pointer(t))
   929  		}
   930  	}
   931  
   932  	// Insert s into appropriate list.
   933  	h.freeList(s.npages).insert(s)
   934  }
   935  
   936  func (h *mheap) freeList(npages uintptr) *mSpanList {
   937  	if npages < uintptr(len(h.free)) {
   938  		return &h.free[npages]
   939  	}
   940  	return &h.freelarge
   941  }
   942  
   943  func (h *mheap) busyList(npages uintptr) *mSpanList {
   944  	if npages < uintptr(len(h.free)) {
   945  		return &h.busy[npages]
   946  	}
   947  	return &h.busylarge
   948  }
   949  
   950  func scavengelist(list *mSpanList, now, limit uint64) uintptr {
   951  	if list.isEmpty() {
   952  		return 0
   953  	}
   954  
   955  	var sumreleased uintptr
   956  	for s := list.first; s != nil; s = s.next {
   957  		if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
   958  			start := s.base()
   959  			end := start + s.npages<<_PageShift
   960  			if physPageSize > _PageSize {
   961  				// We can only release pages in
   962  				// physPageSize blocks, so round start
   963  				// and end in. (Otherwise, madvise
   964  				// will round them *out* and release
   965  				// more memory than we want.)
   966  				start = (start + physPageSize - 1) &^ (physPageSize - 1)
   967  				end &^= physPageSize - 1
   968  				if end <= start {
   969  					// start and end don't span a
   970  					// whole physical page.
   971  					continue
   972  				}
   973  			}
   974  			len := end - start
   975  
   976  			released := len - (s.npreleased << _PageShift)
   977  			if physPageSize > _PageSize && released == 0 {
   978  				continue
   979  			}
   980  			memstats.heap_released += uint64(released)
   981  			sumreleased += released
   982  			s.npreleased = len >> _PageShift
   983  			sysUnused(unsafe.Pointer(start), len)
   984  		}
   985  	}
   986  	return sumreleased
   987  }
   988  
   989  func (h *mheap) scavenge(k int32, now, limit uint64) {
   990  	lock(&h.lock)
   991  	var sumreleased uintptr
   992  	for i := 0; i < len(h.free); i++ {
   993  		sumreleased += scavengelist(&h.free[i], now, limit)
   994  	}
   995  	sumreleased += scavengelist(&h.freelarge, now, limit)
   996  	unlock(&h.lock)
   997  
   998  	if debug.gctrace > 0 {
   999  		if sumreleased > 0 {
  1000  			print("scvg", k, ": ", sumreleased>>20, " MB released\n")
  1001  		}
  1002  		// TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
  1003  		// But we can't call ReadMemStats on g0 holding locks.
  1004  		print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
  1005  	}
  1006  }
  1007  
  1008  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
  1009  func runtime_debug_freeOSMemory() {
  1010  	gcStart(gcForceBlockMode, false)
  1011  	systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
  1012  }
  1013  
  1014  // Initialize a new span with the given start and npages.
  1015  func (span *mspan) init(base uintptr, npages uintptr) {
  1016  	// span is *not* zeroed.
  1017  	span.next = nil
  1018  	span.prev = nil
  1019  	span.list = nil
  1020  	span.startAddr = base
  1021  	span.npages = npages
  1022  	span.allocCount = 0
  1023  	span.sizeclass = 0
  1024  	span.incache = false
  1025  	span.elemsize = 0
  1026  	span.state = _MSpanDead
  1027  	span.unusedsince = 0
  1028  	span.npreleased = 0
  1029  	span.speciallock.key = 0
  1030  	span.specials = nil
  1031  	span.needzero = 0
  1032  	span.freeindex = 0
  1033  	span.allocBits = nil
  1034  	span.gcmarkBits = nil
  1035  }
  1036  
  1037  func (span *mspan) inList() bool {
  1038  	return span.list != nil
  1039  }
  1040  
  1041  // Initialize an empty doubly-linked list.
  1042  func (list *mSpanList) init() {
  1043  	list.first = nil
  1044  	list.last = nil
  1045  }
  1046  
  1047  func (list *mSpanList) remove(span *mspan) {
  1048  	if span.list != list {
  1049  		println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list)
  1050  		throw("MSpanList_Remove")
  1051  	}
  1052  	if list.first == span {
  1053  		list.first = span.next
  1054  	} else {
  1055  		span.prev.next = span.next
  1056  	}
  1057  	if list.last == span {
  1058  		list.last = span.prev
  1059  	} else {
  1060  		span.next.prev = span.prev
  1061  	}
  1062  	span.next = nil
  1063  	span.prev = nil
  1064  	span.list = nil
  1065  }
  1066  
  1067  func (list *mSpanList) isEmpty() bool {
  1068  	return list.first == nil
  1069  }
  1070  
  1071  func (list *mSpanList) insert(span *mspan) {
  1072  	if span.next != nil || span.prev != nil || span.list != nil {
  1073  		println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list)
  1074  		throw("MSpanList_Insert")
  1075  	}
  1076  	span.next = list.first
  1077  	if list.first != nil {
  1078  		// The list contains at least one span; link it in.
  1079  		// The last span in the list doesn't change.
  1080  		list.first.prev = span
  1081  	} else {
  1082  		// The list contains no spans, so this is also the last span.
  1083  		list.last = span
  1084  	}
  1085  	list.first = span
  1086  	span.list = list
  1087  }
  1088  
  1089  func (list *mSpanList) insertBack(span *mspan) {
  1090  	if span.next != nil || span.prev != nil || span.list != nil {
  1091  		println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
  1092  		throw("MSpanList_InsertBack")
  1093  	}
  1094  	span.prev = list.last
  1095  	if list.last != nil {
  1096  		// The list contains at least one span.
  1097  		list.last.next = span
  1098  	} else {
  1099  		// The list contains no spans, so this is also the first span.
  1100  		list.first = span
  1101  	}
  1102  	list.last = span
  1103  	span.list = list
  1104  }
  1105  
  1106  const (
  1107  	_KindSpecialFinalizer = 1
  1108  	_KindSpecialProfile   = 2
  1109  	// Note: The finalizer special must be first because if we're freeing
  1110  	// an object, a finalizer special will cause the freeing operation
  1111  	// to abort, and we want to keep the other special records around
  1112  	// if that happens.
  1113  )
  1114  
  1115  //go:notinheap
  1116  type special struct {
  1117  	next   *special // linked list in span
  1118  	offset uint16   // span offset of object
  1119  	kind   byte     // kind of special
  1120  }
  1121  
  1122  // Adds the special record s to the list of special records for
  1123  // the object p. All fields of s should be filled in except for
  1124  // offset & next, which this routine will fill in.
  1125  // Returns true if the special was successfully added, false otherwise.
  1126  // (The add will fail only if a record with the same p and s->kind
  1127  //  already exists.)
  1128  func addspecial(p unsafe.Pointer, s *special) bool {
  1129  	span := mheap_.lookupMaybe(p)
  1130  	if span == nil {
  1131  		throw("addspecial on invalid pointer")
  1132  	}
  1133  
  1134  	// Ensure that the span is swept.
  1135  	// Sweeping accesses the specials list w/o locks, so we have
  1136  	// to synchronize with it. And it's just much safer.
  1137  	mp := acquirem()
  1138  	span.ensureSwept()
  1139  
  1140  	offset := uintptr(p) - span.base()
  1141  	kind := s.kind
  1142  
  1143  	lock(&span.speciallock)
  1144  
  1145  	// Find splice point, check for existing record.
  1146  	t := &span.specials
  1147  	for {
  1148  		x := *t
  1149  		if x == nil {
  1150  			break
  1151  		}
  1152  		if offset == uintptr(x.offset) && kind == x.kind {
  1153  			unlock(&span.speciallock)
  1154  			releasem(mp)
  1155  			return false // already exists
  1156  		}
  1157  		if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
  1158  			break
  1159  		}
  1160  		t = &x.next
  1161  	}
  1162  
  1163  	// Splice in record, fill in offset.
  1164  	s.offset = uint16(offset)
  1165  	s.next = *t
  1166  	*t = s
  1167  	unlock(&span.speciallock)
  1168  	releasem(mp)
  1169  
  1170  	return true
  1171  }
  1172  
  1173  // Removes the Special record of the given kind for the object p.
  1174  // Returns the record if the record existed, nil otherwise.
  1175  // The caller must FixAlloc_Free the result.
  1176  func removespecial(p unsafe.Pointer, kind uint8) *special {
  1177  	span := mheap_.lookupMaybe(p)
  1178  	if span == nil {
  1179  		throw("removespecial on invalid pointer")
  1180  	}
  1181  
  1182  	// Ensure that the span is swept.
  1183  	// Sweeping accesses the specials list w/o locks, so we have
  1184  	// to synchronize with it. And it's just much safer.
  1185  	mp := acquirem()
  1186  	span.ensureSwept()
  1187  
  1188  	offset := uintptr(p) - span.base()
  1189  
  1190  	lock(&span.speciallock)
  1191  	t := &span.specials
  1192  	for {
  1193  		s := *t
  1194  		if s == nil {
  1195  			break
  1196  		}
  1197  		// This function is used for finalizers only, so we don't check for
  1198  		// "interior" specials (p must be exactly equal to s->offset).
  1199  		if offset == uintptr(s.offset) && kind == s.kind {
  1200  			*t = s.next
  1201  			unlock(&span.speciallock)
  1202  			releasem(mp)
  1203  			return s
  1204  		}
  1205  		t = &s.next
  1206  	}
  1207  	unlock(&span.speciallock)
  1208  	releasem(mp)
  1209  	return nil
  1210  }
  1211  
  1212  // The described object has a finalizer set for it.
  1213  //
  1214  // specialfinalizer is allocated from non-GC'd memory, so any heap
  1215  // pointers must be specially handled.
  1216  //
  1217  //go:notinheap
  1218  type specialfinalizer struct {
  1219  	special special
  1220  	fn      *funcval // May be a heap pointer.
  1221  	nret    uintptr
  1222  	fint    *_type   // May be a heap pointer, but always live.
  1223  	ot      *ptrtype // May be a heap pointer, but always live.
  1224  }
  1225  
  1226  // Adds a finalizer to the object p. Returns true if it succeeded.
  1227  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
  1228  	lock(&mheap_.speciallock)
  1229  	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
  1230  	unlock(&mheap_.speciallock)
  1231  	s.special.kind = _KindSpecialFinalizer
  1232  	s.fn = f
  1233  	s.nret = nret
  1234  	s.fint = fint
  1235  	s.ot = ot
  1236  	if addspecial(p, &s.special) {
  1237  		// This is responsible for maintaining the same
  1238  		// GC-related invariants as markrootSpans in any
  1239  		// situation where it's possible that markrootSpans
  1240  		// has already run but mark termination hasn't yet.
  1241  		if gcphase != _GCoff {
  1242  			_, base, _ := findObject(p)
  1243  			mp := acquirem()
  1244  			gcw := &mp.p.ptr().gcw
  1245  			// Mark everything reachable from the object
  1246  			// so it's retained for the finalizer.
  1247  			scanobject(uintptr(base), gcw)
  1248  			// Mark the finalizer itself, since the
  1249  			// special isn't part of the GC'd heap.
  1250  			scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
  1251  			if gcBlackenPromptly {
  1252  				gcw.dispose()
  1253  			}
  1254  			releasem(mp)
  1255  		}
  1256  		return true
  1257  	}
  1258  
  1259  	// There was an old finalizer
  1260  	lock(&mheap_.speciallock)
  1261  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1262  	unlock(&mheap_.speciallock)
  1263  	return false
  1264  }
  1265  
  1266  // Removes the finalizer (if any) from the object p.
  1267  func removefinalizer(p unsafe.Pointer) {
  1268  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
  1269  	if s == nil {
  1270  		return // there wasn't a finalizer to remove
  1271  	}
  1272  	lock(&mheap_.speciallock)
  1273  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1274  	unlock(&mheap_.speciallock)
  1275  }
  1276  
  1277  // The described object is being heap profiled.
  1278  //
  1279  //go:notinheap
  1280  type specialprofile struct {
  1281  	special special
  1282  	b       *bucket
  1283  }
  1284  
  1285  // Set the heap profile bucket associated with addr to b.
  1286  func setprofilebucket(p unsafe.Pointer, b *bucket) {
  1287  	lock(&mheap_.speciallock)
  1288  	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
  1289  	unlock(&mheap_.speciallock)
  1290  	s.special.kind = _KindSpecialProfile
  1291  	s.b = b
  1292  	if !addspecial(p, &s.special) {
  1293  		throw("setprofilebucket: profile already set")
  1294  	}
  1295  }
  1296  
  1297  // Do whatever cleanup needs to be done to deallocate s. It has
  1298  // already been unlinked from the MSpan specials list.
  1299  func freespecial(s *special, p unsafe.Pointer, size uintptr) {
  1300  	switch s.kind {
  1301  	case _KindSpecialFinalizer:
  1302  		sf := (*specialfinalizer)(unsafe.Pointer(s))
  1303  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
  1304  		lock(&mheap_.speciallock)
  1305  		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
  1306  		unlock(&mheap_.speciallock)
  1307  	case _KindSpecialProfile:
  1308  		sp := (*specialprofile)(unsafe.Pointer(s))
  1309  		mProf_Free(sp.b, size)
  1310  		lock(&mheap_.speciallock)
  1311  		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
  1312  		unlock(&mheap_.speciallock)
  1313  	default:
  1314  		throw("bad special kind")
  1315  		panic("not reached")
  1316  	}
  1317  }
  1318  
  1319  const gcBitsChunkBytes = uintptr(64 << 10)
  1320  const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
  1321  
  1322  type gcBitsHeader struct {
  1323  	free uintptr // free is the index into bits of the next free byte.
  1324  	next uintptr // *gcBits triggers recursive type bug. (issue 14620)
  1325  }
  1326  
  1327  //go:notinheap
  1328  type gcBits struct {
  1329  	// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
  1330  	free uintptr // free is the index into bits of the next free byte.
  1331  	next *gcBits
  1332  	bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8
  1333  }
  1334  
  1335  var gcBitsArenas struct {
  1336  	lock     mutex
  1337  	free     *gcBits
  1338  	next     *gcBits
  1339  	current  *gcBits
  1340  	previous *gcBits
  1341  }
  1342  
  1343  // newMarkBits returns a pointer to 8 byte aligned bytes
  1344  // to be used for a span's mark bits.
  1345  func newMarkBits(nelems uintptr) *uint8 {
  1346  	lock(&gcBitsArenas.lock)
  1347  	blocksNeeded := uintptr((nelems + 63) / 64)
  1348  	bytesNeeded := blocksNeeded * 8
  1349  	if gcBitsArenas.next == nil ||
  1350  		gcBitsArenas.next.free+bytesNeeded > uintptr(len(gcBits{}.bits)) {
  1351  		// Allocate a new arena.
  1352  		fresh := newArena()
  1353  		fresh.next = gcBitsArenas.next
  1354  		gcBitsArenas.next = fresh
  1355  	}
  1356  	if gcBitsArenas.next.free >= gcBitsChunkBytes {
  1357  		println("runtime: gcBitsArenas.next.free=", gcBitsArenas.next.free, gcBitsChunkBytes)
  1358  		throw("markBits overflow")
  1359  	}
  1360  	result := &gcBitsArenas.next.bits[gcBitsArenas.next.free]
  1361  	gcBitsArenas.next.free += bytesNeeded
  1362  	unlock(&gcBitsArenas.lock)
  1363  	return result
  1364  }
  1365  
  1366  // newAllocBits returns a pointer to 8 byte aligned bytes
  1367  // to be used for this span's alloc bits.
  1368  // newAllocBits is used to provide newly initialized spans
  1369  // allocation bits. For spans not being initialized the
  1370  // the mark bits are repurposed as allocation bits when
  1371  // the span is swept.
  1372  func newAllocBits(nelems uintptr) *uint8 {
  1373  	return newMarkBits(nelems)
  1374  }
  1375  
  1376  // nextMarkBitArenaEpoch establishes a new epoch for the arenas
  1377  // holding the mark bits. The arenas are named relative to the
  1378  // current GC cycle which is demarcated by the call to finishweep_m.
  1379  //
  1380  // All current spans have been swept.
  1381  // During that sweep each span allocated room for its gcmarkBits in
  1382  // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
  1383  // where the GC will mark objects and after each span is swept these bits
  1384  // will be used to allocate objects.
  1385  // gcBitsArenas.current becomes gcBitsArenas.previous where the span's
  1386  // gcAllocBits live until all the spans have been swept during this GC cycle.
  1387  // The span's sweep extinguishes all the references to gcBitsArenas.previous
  1388  // by pointing gcAllocBits into the gcBitsArenas.current.
  1389  // The gcBitsArenas.previous is released to the gcBitsArenas.free list.
  1390  func nextMarkBitArenaEpoch() {
  1391  	lock(&gcBitsArenas.lock)
  1392  	if gcBitsArenas.previous != nil {
  1393  		if gcBitsArenas.free == nil {
  1394  			gcBitsArenas.free = gcBitsArenas.previous
  1395  		} else {
  1396  			// Find end of previous arenas.
  1397  			last := gcBitsArenas.previous
  1398  			for last = gcBitsArenas.previous; last.next != nil; last = last.next {
  1399  			}
  1400  			last.next = gcBitsArenas.free
  1401  			gcBitsArenas.free = gcBitsArenas.previous
  1402  		}
  1403  	}
  1404  	gcBitsArenas.previous = gcBitsArenas.current
  1405  	gcBitsArenas.current = gcBitsArenas.next
  1406  	gcBitsArenas.next = nil // newMarkBits calls newArena when needed
  1407  	unlock(&gcBitsArenas.lock)
  1408  }
  1409  
  1410  // newArena allocates and zeroes a gcBits arena.
  1411  func newArena() *gcBits {
  1412  	var result *gcBits
  1413  	if gcBitsArenas.free == nil {
  1414  		result = (*gcBits)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
  1415  		if result == nil {
  1416  			throw("runtime: cannot allocate memory")
  1417  		}
  1418  	} else {
  1419  		result = gcBitsArenas.free
  1420  		gcBitsArenas.free = gcBitsArenas.free.next
  1421  		memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
  1422  	}
  1423  	result.next = nil
  1424  	// If result.bits is not 8 byte aligned adjust index so
  1425  	// that &result.bits[result.free] is 8 byte aligned.
  1426  	if uintptr(unsafe.Offsetof(gcBits{}.bits))&7 == 0 {
  1427  		result.free = 0
  1428  	} else {
  1429  		result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
  1430  	}
  1431  	return result
  1432  }