github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/mheap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.go for overview.
     8  
     9  package runtime
    10  
    11  import "unsafe"
    12  
    13  // Main malloc heap.
    14  // The heap itself is the "free[]" and "large" arrays,
    15  // but all the other global data is here too.
    16  type mheap struct {
    17  	lock      mutex
    18  	free      [_MaxMHeapList]mspan // free lists of given length
    19  	freelarge mspan                // free lists length >= _MaxMHeapList
    20  	busy      [_MaxMHeapList]mspan // busy lists of large objects of given length
    21  	busylarge mspan                // busy lists of large objects length >= _MaxMHeapList
    22  	allspans  **mspan              // all spans out there
    23  	gcspans   **mspan              // copy of allspans referenced by gc marker or sweeper
    24  	nspan     uint32
    25  	sweepgen  uint32 // sweep generation, see comment in mspan
    26  	sweepdone uint32 // all spans are swept
    27  	// span lookup
    28  	spans        **mspan
    29  	spans_mapped uintptr
    30  
    31  	// Proportional sweep
    32  	spanBytesAlloc    uint64  // bytes of spans allocated this cycle; updated atomically
    33  	pagesSwept        uint64  // pages swept this cycle; updated atomically
    34  	sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
    35  
    36  	// Malloc stats.
    37  	largefree  uint64                  // bytes freed for large objects (>maxsmallsize)
    38  	nlargefree uint64                  // number of frees for large objects (>maxsmallsize)
    39  	nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
    40  
    41  	// range of addresses we might see in the heap
    42  	bitmap         uintptr
    43  	bitmap_mapped  uintptr
    44  	arena_start    uintptr
    45  	arena_used     uintptr // always mHeap_Map{Bits,Spans} before updating
    46  	arena_end      uintptr
    47  	arena_reserved bool
    48  
    49  	// central free lists for small size classes.
    50  	// the padding makes sure that the MCentrals are
    51  	// spaced CacheLineSize bytes apart, so that each MCentral.lock
    52  	// gets its own cache line.
    53  	central [_NumSizeClasses]struct {
    54  		mcentral mcentral
    55  		pad      [_CacheLineSize]byte
    56  	}
    57  
    58  	spanalloc             fixalloc // allocator for span*
    59  	cachealloc            fixalloc // allocator for mcache*
    60  	specialfinalizeralloc fixalloc // allocator for specialfinalizer*
    61  	specialprofilealloc   fixalloc // allocator for specialprofile*
    62  	speciallock           mutex    // lock for special record allocators.
    63  }
    64  
    65  var mheap_ mheap
    66  
    67  // An MSpan is a run of pages.
    68  //
    69  // When a MSpan is in the heap free list, state == MSpanFree
    70  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
    71  //
    72  // When a MSpan is allocated, state == MSpanInUse or MSpanStack
    73  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
    74  
    75  // Every MSpan is in one doubly-linked list,
    76  // either one of the MHeap's free lists or one of the
    77  // MCentral's span lists.  We use empty MSpan structures as list heads.
    78  
    79  // An MSpan representing actual memory has state _MSpanInUse,
    80  // _MSpanStack, or _MSpanFree. Transitions between these states are
    81  // constrained as follows:
    82  //
    83  // * A span may transition from free to in-use or stack during any GC
    84  //   phase.
    85  //
    86  // * During sweeping (gcphase == _GCoff), a span may transition from
    87  //   in-use to free (as a result of sweeping) or stack to free (as a
    88  //   result of stacks being freed).
    89  //
    90  // * During GC (gcphase != _GCoff), a span *must not* transition from
    91  //   stack or in-use to free. Because concurrent GC may read a pointer
    92  //   and then look up its span, the span state must be monotonic.
    93  const (
    94  	_MSpanInUse = iota // allocated for garbage collected heap
    95  	_MSpanStack        // allocated for use by stack allocator
    96  	_MSpanFree
    97  	_MSpanListHead
    98  	_MSpanDead
    99  )
   100  
   101  type mspan struct {
   102  	next     *mspan    // in a span linked list
   103  	prev     *mspan    // in a span linked list
   104  	start    pageID    // starting page number
   105  	npages   uintptr   // number of pages in span
   106  	freelist gclinkptr // list of free objects
   107  	// sweep generation:
   108  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   109  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   110  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   111  	// h->sweepgen is incremented by 2 after every GC
   112  
   113  	sweepgen    uint32
   114  	divMul      uint32   // for divide by elemsize - divMagic.mul
   115  	ref         uint16   // capacity - number of objects in freelist
   116  	sizeclass   uint8    // size class
   117  	incache     bool     // being used by an mcache
   118  	state       uint8    // mspaninuse etc
   119  	needzero    uint8    // needs to be zeroed before allocation
   120  	divShift    uint8    // for divide by elemsize - divMagic.shift
   121  	divShift2   uint8    // for divide by elemsize - divMagic.shift2
   122  	elemsize    uintptr  // computed from sizeclass or from npages
   123  	unusedsince int64    // first time spotted by gc in mspanfree state
   124  	npreleased  uintptr  // number of pages released to the os
   125  	limit       uintptr  // end of data in span
   126  	speciallock mutex    // guards specials list
   127  	specials    *special // linked list of special records sorted by offset.
   128  	baseMask    uintptr  // if non-0, elemsize is a power of 2, & this will get object allocation base
   129  }
   130  
   131  func (s *mspan) base() uintptr {
   132  	return uintptr(s.start << _PageShift)
   133  }
   134  
   135  func (s *mspan) layout() (size, n, total uintptr) {
   136  	total = s.npages << _PageShift
   137  	size = s.elemsize
   138  	if size > 0 {
   139  		n = total / size
   140  	}
   141  	return
   142  }
   143  
   144  var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
   145  
   146  // h_spans is a lookup table to map virtual address page IDs to *mspan.
   147  // For allocated spans, their pages map to the span itself.
   148  // For free spans, only the lowest and highest pages map to the span itself.  Internal
   149  // pages map to an arbitrary span.
   150  // For pages that have never been allocated, h_spans entries are nil.
   151  var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
   152  
   153  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
   154  	h := (*mheap)(vh)
   155  	s := (*mspan)(p)
   156  	if len(h_allspans) >= cap(h_allspans) {
   157  		n := 64 * 1024 / ptrSize
   158  		if n < cap(h_allspans)*3/2 {
   159  			n = cap(h_allspans) * 3 / 2
   160  		}
   161  		var new []*mspan
   162  		sp := (*slice)(unsafe.Pointer(&new))
   163  		sp.array = sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys)
   164  		if sp.array == nil {
   165  			throw("runtime: cannot allocate memory")
   166  		}
   167  		sp.len = len(h_allspans)
   168  		sp.cap = n
   169  		if len(h_allspans) > 0 {
   170  			copy(new, h_allspans)
   171  			// Don't free the old array if it's referenced by sweep.
   172  			// See the comment in mgc.go.
   173  			if h.allspans != mheap_.gcspans {
   174  				sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*ptrSize, &memstats.other_sys)
   175  			}
   176  		}
   177  		h_allspans = new
   178  		h.allspans = (**mspan)(unsafe.Pointer(sp.array))
   179  	}
   180  	h_allspans = append(h_allspans, s)
   181  	h.nspan = uint32(len(h_allspans))
   182  }
   183  
   184  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   185  // It returns false for pointers into stack spans.
   186  // Non-preemptible because it is used by write barriers.
   187  //go:nowritebarrier
   188  //go:nosplit
   189  func inheap(b uintptr) bool {
   190  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   191  		return false
   192  	}
   193  	// Not a beginning of a block, consult span table to find the block beginning.
   194  	k := b >> _PageShift
   195  	x := k
   196  	x -= mheap_.arena_start >> _PageShift
   197  	s := h_spans[x]
   198  	if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   199  		return false
   200  	}
   201  	return true
   202  }
   203  
   204  // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
   205  // Use the functions instead.
   206  
   207  // spanOf returns the span of p. If p does not point into the heap or
   208  // no span contains p, spanOf returns nil.
   209  func spanOf(p uintptr) *mspan {
   210  	if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
   211  		return nil
   212  	}
   213  	return spanOfUnchecked(p)
   214  }
   215  
   216  // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
   217  // that p points into the heap (that is, mheap_.arena_start <= p <
   218  // mheap_.arena_used).
   219  func spanOfUnchecked(p uintptr) *mspan {
   220  	return h_spans[(p-mheap_.arena_start)>>_PageShift]
   221  }
   222  
   223  func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
   224  	_g_ := getg()
   225  
   226  	_g_.m.mcache.local_nlookup++
   227  	if ptrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
   228  		// purge cache stats to prevent overflow
   229  		lock(&mheap_.lock)
   230  		purgecachedstats(_g_.m.mcache)
   231  		unlock(&mheap_.lock)
   232  	}
   233  
   234  	s := mHeap_LookupMaybe(&mheap_, unsafe.Pointer(v))
   235  	if sp != nil {
   236  		*sp = s
   237  	}
   238  	if s == nil {
   239  		if base != nil {
   240  			*base = 0
   241  		}
   242  		if size != nil {
   243  			*size = 0
   244  		}
   245  		return 0
   246  	}
   247  
   248  	p := uintptr(s.start) << _PageShift
   249  	if s.sizeclass == 0 {
   250  		// Large object.
   251  		if base != nil {
   252  			*base = p
   253  		}
   254  		if size != nil {
   255  			*size = s.npages << _PageShift
   256  		}
   257  		return 1
   258  	}
   259  
   260  	n := s.elemsize
   261  	if base != nil {
   262  		i := (uintptr(v) - uintptr(p)) / n
   263  		*base = p + i*n
   264  	}
   265  	if size != nil {
   266  		*size = n
   267  	}
   268  
   269  	return 1
   270  }
   271  
   272  // Initialize the heap.
   273  func mHeap_Init(h *mheap, spans_size uintptr) {
   274  	fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
   275  	fixAlloc_Init(&h.cachealloc, unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
   276  	fixAlloc_Init(&h.specialfinalizeralloc, unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
   277  	fixAlloc_Init(&h.specialprofilealloc, unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
   278  
   279  	// h->mapcache needs no init
   280  	for i := range h.free {
   281  		mSpanList_Init(&h.free[i])
   282  		mSpanList_Init(&h.busy[i])
   283  	}
   284  
   285  	mSpanList_Init(&h.freelarge)
   286  	mSpanList_Init(&h.busylarge)
   287  	for i := range h.central {
   288  		mCentral_Init(&h.central[i].mcentral, int32(i))
   289  	}
   290  
   291  	sp := (*slice)(unsafe.Pointer(&h_spans))
   292  	sp.array = unsafe.Pointer(h.spans)
   293  	sp.len = int(spans_size / ptrSize)
   294  	sp.cap = int(spans_size / ptrSize)
   295  }
   296  
   297  // mHeap_MapSpans makes sure that the spans are mapped
   298  // up to the new value of arena_used.
   299  //
   300  // It must be called with the expected new value of arena_used,
   301  // *before* h.arena_used has been updated.
   302  // Waiting to update arena_used until after the memory has been mapped
   303  // avoids faults when other threads try access the bitmap immediately
   304  // after observing the change to arena_used.
   305  func mHeap_MapSpans(h *mheap, arena_used uintptr) {
   306  	// Map spans array, PageSize at a time.
   307  	n := arena_used
   308  	n -= h.arena_start
   309  	n = n / _PageSize * ptrSize
   310  	n = round(n, _PhysPageSize)
   311  	if h.spans_mapped >= n {
   312  		return
   313  	}
   314  	sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys)
   315  	h.spans_mapped = n
   316  }
   317  
   318  // Sweeps spans in list until reclaims at least npages into heap.
   319  // Returns the actual number of pages reclaimed.
   320  func mHeap_ReclaimList(h *mheap, list *mspan, npages uintptr) uintptr {
   321  	n := uintptr(0)
   322  	sg := mheap_.sweepgen
   323  retry:
   324  	for s := list.next; s != list; s = s.next {
   325  		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
   326  			mSpanList_Remove(s)
   327  			// swept spans are at the end of the list
   328  			mSpanList_InsertBack(list, s)
   329  			unlock(&h.lock)
   330  			snpages := s.npages
   331  			if mSpan_Sweep(s, false) {
   332  				n += snpages
   333  			}
   334  			lock(&h.lock)
   335  			if n >= npages {
   336  				return n
   337  			}
   338  			// the span could have been moved elsewhere
   339  			goto retry
   340  		}
   341  		if s.sweepgen == sg-1 {
   342  			// the span is being sweept by background sweeper, skip
   343  			continue
   344  		}
   345  		// already swept empty span,
   346  		// all subsequent ones must also be either swept or in process of sweeping
   347  		break
   348  	}
   349  	return n
   350  }
   351  
   352  // Sweeps and reclaims at least npage pages into heap.
   353  // Called before allocating npage pages.
   354  func mHeap_Reclaim(h *mheap, npage uintptr) {
   355  	// First try to sweep busy spans with large objects of size >= npage,
   356  	// this has good chances of reclaiming the necessary space.
   357  	for i := int(npage); i < len(h.busy); i++ {
   358  		if mHeap_ReclaimList(h, &h.busy[i], npage) != 0 {
   359  			return // Bingo!
   360  		}
   361  	}
   362  
   363  	// Then -- even larger objects.
   364  	if mHeap_ReclaimList(h, &h.busylarge, npage) != 0 {
   365  		return // Bingo!
   366  	}
   367  
   368  	// Now try smaller objects.
   369  	// One such object is not enough, so we need to reclaim several of them.
   370  	reclaimed := uintptr(0)
   371  	for i := 0; i < int(npage) && i < len(h.busy); i++ {
   372  		reclaimed += mHeap_ReclaimList(h, &h.busy[i], npage-reclaimed)
   373  		if reclaimed >= npage {
   374  			return
   375  		}
   376  	}
   377  
   378  	// Now sweep everything that is not yet swept.
   379  	unlock(&h.lock)
   380  	for {
   381  		n := sweepone()
   382  		if n == ^uintptr(0) { // all spans are swept
   383  			break
   384  		}
   385  		reclaimed += n
   386  		if reclaimed >= npage {
   387  			break
   388  		}
   389  	}
   390  	lock(&h.lock)
   391  }
   392  
   393  // Allocate a new span of npage pages from the heap for GC'd memory
   394  // and record its size class in the HeapMap and HeapMapCache.
   395  func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
   396  	_g_ := getg()
   397  	if _g_ != _g_.m.g0 {
   398  		throw("_mheap_alloc not on g0 stack")
   399  	}
   400  	lock(&h.lock)
   401  
   402  	// To prevent excessive heap growth, before allocating n pages
   403  	// we need to sweep and reclaim at least n pages.
   404  	if h.sweepdone == 0 {
   405  		// TODO(austin): This tends to sweep a large number of
   406  		// spans in order to find a few completely free spans
   407  		// (for example, in the garbage benchmark, this sweeps
   408  		// ~30x the number of pages its trying to allocate).
   409  		// If GC kept a bit for whether there were any marks
   410  		// in a span, we could release these free spans
   411  		// at the end of GC and eliminate this entirely.
   412  		mHeap_Reclaim(h, npage)
   413  	}
   414  
   415  	// transfer stats from cache to global
   416  	memstats.heap_live += uint64(_g_.m.mcache.local_cachealloc)
   417  	_g_.m.mcache.local_cachealloc = 0
   418  	memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
   419  	_g_.m.mcache.local_scan = 0
   420  	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
   421  	_g_.m.mcache.local_tinyallocs = 0
   422  
   423  	gcController.revise()
   424  
   425  	s := mHeap_AllocSpanLocked(h, npage)
   426  	if s != nil {
   427  		// Record span info, because gc needs to be
   428  		// able to map interior pointer to containing span.
   429  		atomicstore(&s.sweepgen, h.sweepgen)
   430  		s.state = _MSpanInUse
   431  		s.freelist = 0
   432  		s.ref = 0
   433  		s.sizeclass = uint8(sizeclass)
   434  		if sizeclass == 0 {
   435  			s.elemsize = s.npages << _PageShift
   436  			s.divShift = 0
   437  			s.divMul = 0
   438  			s.divShift2 = 0
   439  			s.baseMask = 0
   440  		} else {
   441  			s.elemsize = uintptr(class_to_size[sizeclass])
   442  			m := &class_to_divmagic[sizeclass]
   443  			s.divShift = m.shift
   444  			s.divMul = m.mul
   445  			s.divShift2 = m.shift2
   446  			s.baseMask = m.baseMask
   447  		}
   448  
   449  		// update stats, sweep lists
   450  		if large {
   451  			memstats.heap_objects++
   452  			memstats.heap_live += uint64(npage << _PageShift)
   453  			// Swept spans are at the end of lists.
   454  			if s.npages < uintptr(len(h.free)) {
   455  				mSpanList_InsertBack(&h.busy[s.npages], s)
   456  			} else {
   457  				mSpanList_InsertBack(&h.busylarge, s)
   458  			}
   459  		}
   460  	}
   461  	if trace.enabled {
   462  		traceHeapAlloc()
   463  	}
   464  
   465  	// h_spans is accessed concurrently without synchronization
   466  	// from other threads. Hence, there must be a store/store
   467  	// barrier here to ensure the writes to h_spans above happen
   468  	// before the caller can publish a pointer p to an object
   469  	// allocated from s. As soon as this happens, the garbage
   470  	// collector running on another processor could read p and
   471  	// look up s in h_spans. The unlock acts as the barrier to
   472  	// order these writes. On the read side, the data dependency
   473  	// between p and the index in h_spans orders the reads.
   474  	unlock(&h.lock)
   475  	return s
   476  }
   477  
   478  func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
   479  	// Don't do any operations that lock the heap on the G stack.
   480  	// It might trigger stack growth, and the stack growth code needs
   481  	// to be able to allocate heap.
   482  	var s *mspan
   483  	systemstack(func() {
   484  		s = mHeap_Alloc_m(h, npage, sizeclass, large)
   485  	})
   486  
   487  	if s != nil {
   488  		if needzero && s.needzero != 0 {
   489  			memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
   490  		}
   491  		s.needzero = 0
   492  	}
   493  	return s
   494  }
   495  
   496  func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
   497  	_g_ := getg()
   498  	if _g_ != _g_.m.g0 {
   499  		throw("mheap_allocstack not on g0 stack")
   500  	}
   501  	lock(&h.lock)
   502  	s := mHeap_AllocSpanLocked(h, npage)
   503  	if s != nil {
   504  		s.state = _MSpanStack
   505  		s.freelist = 0
   506  		s.ref = 0
   507  		memstats.stacks_inuse += uint64(s.npages << _PageShift)
   508  	}
   509  
   510  	// This unlock acts as a release barrier. See mHeap_Alloc_m.
   511  	unlock(&h.lock)
   512  	return s
   513  }
   514  
   515  // Allocates a span of the given size.  h must be locked.
   516  // The returned span has been removed from the
   517  // free list, but its state is still MSpanFree.
   518  func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
   519  	var s *mspan
   520  
   521  	// Try in fixed-size lists up to max.
   522  	for i := int(npage); i < len(h.free); i++ {
   523  		if !mSpanList_IsEmpty(&h.free[i]) {
   524  			s = h.free[i].next
   525  			goto HaveSpan
   526  		}
   527  	}
   528  
   529  	// Best fit in list of large spans.
   530  	s = mHeap_AllocLarge(h, npage)
   531  	if s == nil {
   532  		if !mHeap_Grow(h, npage) {
   533  			return nil
   534  		}
   535  		s = mHeap_AllocLarge(h, npage)
   536  		if s == nil {
   537  			return nil
   538  		}
   539  	}
   540  
   541  HaveSpan:
   542  	// Mark span in use.
   543  	if s.state != _MSpanFree {
   544  		throw("MHeap_AllocLocked - MSpan not free")
   545  	}
   546  	if s.npages < npage {
   547  		throw("MHeap_AllocLocked - bad npages")
   548  	}
   549  	mSpanList_Remove(s)
   550  	if s.next != nil || s.prev != nil {
   551  		throw("still in list")
   552  	}
   553  	if s.npreleased > 0 {
   554  		sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
   555  		memstats.heap_released -= uint64(s.npreleased << _PageShift)
   556  		s.npreleased = 0
   557  	}
   558  
   559  	if s.npages > npage {
   560  		// Trim extra and put it back in the heap.
   561  		t := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
   562  		mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
   563  		s.npages = npage
   564  		p := uintptr(t.start)
   565  		p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   566  		if p > 0 {
   567  			h_spans[p-1] = s
   568  		}
   569  		h_spans[p] = t
   570  		h_spans[p+t.npages-1] = t
   571  		t.needzero = s.needzero
   572  		s.state = _MSpanStack // prevent coalescing with s
   573  		t.state = _MSpanStack
   574  		mHeap_FreeSpanLocked(h, t, false, false, s.unusedsince)
   575  		s.state = _MSpanFree
   576  	}
   577  	s.unusedsince = 0
   578  
   579  	p := uintptr(s.start)
   580  	p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   581  	for n := uintptr(0); n < npage; n++ {
   582  		h_spans[p+n] = s
   583  	}
   584  
   585  	memstats.heap_inuse += uint64(npage << _PageShift)
   586  	memstats.heap_idle -= uint64(npage << _PageShift)
   587  
   588  	//println("spanalloc", hex(s.start<<_PageShift))
   589  	if s.next != nil || s.prev != nil {
   590  		throw("still in list")
   591  	}
   592  	return s
   593  }
   594  
   595  // Allocate a span of exactly npage pages from the list of large spans.
   596  func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
   597  	return bestFit(&h.freelarge, npage, nil)
   598  }
   599  
   600  // Search list for smallest span with >= npage pages.
   601  // If there are multiple smallest spans, take the one
   602  // with the earliest starting address.
   603  func bestFit(list *mspan, npage uintptr, best *mspan) *mspan {
   604  	for s := list.next; s != list; s = s.next {
   605  		if s.npages < npage {
   606  			continue
   607  		}
   608  		if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) {
   609  			best = s
   610  		}
   611  	}
   612  	return best
   613  }
   614  
   615  // Try to add at least npage pages of memory to the heap,
   616  // returning whether it worked.
   617  func mHeap_Grow(h *mheap, npage uintptr) bool {
   618  	// Ask for a big chunk, to reduce the number of mappings
   619  	// the operating system needs to track; also amortizes
   620  	// the overhead of an operating system mapping.
   621  	// Allocate a multiple of 64kB.
   622  	npage = round(npage, (64<<10)/_PageSize)
   623  	ask := npage << _PageShift
   624  	if ask < _HeapAllocChunk {
   625  		ask = _HeapAllocChunk
   626  	}
   627  
   628  	v := mHeap_SysAlloc(h, ask)
   629  	if v == nil {
   630  		if ask > npage<<_PageShift {
   631  			ask = npage << _PageShift
   632  			v = mHeap_SysAlloc(h, ask)
   633  		}
   634  		if v == nil {
   635  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
   636  			return false
   637  		}
   638  	}
   639  
   640  	// Create a fake "in use" span and free it, so that the
   641  	// right coalescing happens.
   642  	s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
   643  	mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
   644  	p := uintptr(s.start)
   645  	p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   646  	for i := p; i < p+s.npages; i++ {
   647  		h_spans[i] = s
   648  	}
   649  	atomicstore(&s.sweepgen, h.sweepgen)
   650  	s.state = _MSpanInUse
   651  	mHeap_FreeSpanLocked(h, s, false, true, 0)
   652  	return true
   653  }
   654  
   655  // Look up the span at the given address.
   656  // Address is guaranteed to be in map
   657  // and is guaranteed to be start or end of span.
   658  func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
   659  	p := uintptr(v)
   660  	p -= uintptr(unsafe.Pointer(h.arena_start))
   661  	return h_spans[p>>_PageShift]
   662  }
   663  
   664  // Look up the span at the given address.
   665  // Address is *not* guaranteed to be in map
   666  // and may be anywhere in the span.
   667  // Map entries for the middle of a span are only
   668  // valid for allocated spans.  Free spans may have
   669  // other garbage in their middles, so we have to
   670  // check for that.
   671  func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
   672  	if uintptr(v) < uintptr(unsafe.Pointer(h.arena_start)) || uintptr(v) >= uintptr(unsafe.Pointer(h.arena_used)) {
   673  		return nil
   674  	}
   675  	p := uintptr(v) >> _PageShift
   676  	q := p
   677  	q -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
   678  	s := h_spans[q]
   679  	if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
   680  		return nil
   681  	}
   682  	return s
   683  }
   684  
   685  // Free the span back into the heap.
   686  func mHeap_Free(h *mheap, s *mspan, acct int32) {
   687  	systemstack(func() {
   688  		mp := getg().m
   689  		lock(&h.lock)
   690  		memstats.heap_live += uint64(mp.mcache.local_cachealloc)
   691  		mp.mcache.local_cachealloc = 0
   692  		memstats.heap_scan += uint64(mp.mcache.local_scan)
   693  		mp.mcache.local_scan = 0
   694  		memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
   695  		mp.mcache.local_tinyallocs = 0
   696  		if acct != 0 {
   697  			memstats.heap_objects--
   698  		}
   699  		gcController.revise()
   700  		mHeap_FreeSpanLocked(h, s, true, true, 0)
   701  		if trace.enabled {
   702  			traceHeapAlloc()
   703  		}
   704  		unlock(&h.lock)
   705  	})
   706  }
   707  
   708  func mHeap_FreeStack(h *mheap, s *mspan) {
   709  	_g_ := getg()
   710  	if _g_ != _g_.m.g0 {
   711  		throw("mheap_freestack not on g0 stack")
   712  	}
   713  	s.needzero = 1
   714  	lock(&h.lock)
   715  	memstats.stacks_inuse -= uint64(s.npages << _PageShift)
   716  	mHeap_FreeSpanLocked(h, s, true, true, 0)
   717  	unlock(&h.lock)
   718  }
   719  
   720  func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsince int64) {
   721  	switch s.state {
   722  	case _MSpanStack:
   723  		if s.ref != 0 {
   724  			throw("MHeap_FreeSpanLocked - invalid stack free")
   725  		}
   726  	case _MSpanInUse:
   727  		if s.ref != 0 || s.sweepgen != h.sweepgen {
   728  			print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
   729  			throw("MHeap_FreeSpanLocked - invalid free")
   730  		}
   731  	default:
   732  		throw("MHeap_FreeSpanLocked - invalid span state")
   733  	}
   734  
   735  	if acctinuse {
   736  		memstats.heap_inuse -= uint64(s.npages << _PageShift)
   737  	}
   738  	if acctidle {
   739  		memstats.heap_idle += uint64(s.npages << _PageShift)
   740  	}
   741  	s.state = _MSpanFree
   742  	mSpanList_Remove(s)
   743  
   744  	// Stamp newly unused spans. The scavenger will use that
   745  	// info to potentially give back some pages to the OS.
   746  	s.unusedsince = unusedsince
   747  	if unusedsince == 0 {
   748  		s.unusedsince = nanotime()
   749  	}
   750  	s.npreleased = 0
   751  
   752  	// Coalesce with earlier, later spans.
   753  	p := uintptr(s.start)
   754  	p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
   755  	if p > 0 {
   756  		t := h_spans[p-1]
   757  		if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
   758  			s.start = t.start
   759  			s.npages += t.npages
   760  			s.npreleased = t.npreleased // absorb released pages
   761  			s.needzero |= t.needzero
   762  			p -= t.npages
   763  			h_spans[p] = s
   764  			mSpanList_Remove(t)
   765  			t.state = _MSpanDead
   766  			fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
   767  		}
   768  	}
   769  	if (p+s.npages)*ptrSize < h.spans_mapped {
   770  		t := h_spans[p+s.npages]
   771  		if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
   772  			s.npages += t.npages
   773  			s.npreleased += t.npreleased
   774  			s.needzero |= t.needzero
   775  			h_spans[p+s.npages-1] = s
   776  			mSpanList_Remove(t)
   777  			t.state = _MSpanDead
   778  			fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
   779  		}
   780  	}
   781  
   782  	// Insert s into appropriate list.
   783  	if s.npages < uintptr(len(h.free)) {
   784  		mSpanList_Insert(&h.free[s.npages], s)
   785  	} else {
   786  		mSpanList_Insert(&h.freelarge, s)
   787  	}
   788  }
   789  
   790  func scavengelist(list *mspan, now, limit uint64) uintptr {
   791  	if _PhysPageSize > _PageSize {
   792  		// golang.org/issue/9993
   793  		// If the physical page size of the machine is larger than
   794  		// our logical heap page size the kernel may round up the
   795  		// amount to be freed to its page size and corrupt the heap
   796  		// pages surrounding the unused block.
   797  		return 0
   798  	}
   799  
   800  	if mSpanList_IsEmpty(list) {
   801  		return 0
   802  	}
   803  
   804  	var sumreleased uintptr
   805  	for s := list.next; s != list; s = s.next {
   806  		if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
   807  			released := (s.npages - s.npreleased) << _PageShift
   808  			memstats.heap_released += uint64(released)
   809  			sumreleased += released
   810  			s.npreleased = s.npages
   811  			sysUnused((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
   812  		}
   813  	}
   814  	return sumreleased
   815  }
   816  
   817  func mHeap_Scavenge(k int32, now, limit uint64) {
   818  	h := &mheap_
   819  	lock(&h.lock)
   820  	var sumreleased uintptr
   821  	for i := 0; i < len(h.free); i++ {
   822  		sumreleased += scavengelist(&h.free[i], now, limit)
   823  	}
   824  	sumreleased += scavengelist(&h.freelarge, now, limit)
   825  	unlock(&h.lock)
   826  
   827  	if debug.gctrace > 0 {
   828  		if sumreleased > 0 {
   829  			print("scvg", k, ": ", sumreleased>>20, " MB released\n")
   830  		}
   831  		// TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
   832  		// But we can't call ReadMemStats on g0 holding locks.
   833  		print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
   834  	}
   835  }
   836  
   837  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
   838  func runtime_debug_freeOSMemory() {
   839  	startGC(gcForceBlockMode, false)
   840  	systemstack(func() { mHeap_Scavenge(-1, ^uint64(0), 0) })
   841  }
   842  
   843  // Initialize a new span with the given start and npages.
   844  func mSpan_Init(span *mspan, start pageID, npages uintptr) {
   845  	span.next = nil
   846  	span.prev = nil
   847  	span.start = start
   848  	span.npages = npages
   849  	span.freelist = 0
   850  	span.ref = 0
   851  	span.sizeclass = 0
   852  	span.incache = false
   853  	span.elemsize = 0
   854  	span.state = _MSpanDead
   855  	span.unusedsince = 0
   856  	span.npreleased = 0
   857  	span.speciallock.key = 0
   858  	span.specials = nil
   859  	span.needzero = 0
   860  }
   861  
   862  // Initialize an empty doubly-linked list.
   863  func mSpanList_Init(list *mspan) {
   864  	list.state = _MSpanListHead
   865  	list.next = list
   866  	list.prev = list
   867  }
   868  
   869  func mSpanList_Remove(span *mspan) {
   870  	if span.prev == nil && span.next == nil {
   871  		return
   872  	}
   873  	span.prev.next = span.next
   874  	span.next.prev = span.prev
   875  	span.prev = nil
   876  	span.next = nil
   877  }
   878  
   879  func mSpanList_IsEmpty(list *mspan) bool {
   880  	return list.next == list
   881  }
   882  
   883  func mSpanList_Insert(list *mspan, span *mspan) {
   884  	if span.next != nil || span.prev != nil {
   885  		println("failed MSpanList_Insert", span, span.next, span.prev)
   886  		throw("MSpanList_Insert")
   887  	}
   888  	span.next = list.next
   889  	span.prev = list
   890  	span.next.prev = span
   891  	span.prev.next = span
   892  }
   893  
   894  func mSpanList_InsertBack(list *mspan, span *mspan) {
   895  	if span.next != nil || span.prev != nil {
   896  		println("failed MSpanList_InsertBack", span, span.next, span.prev)
   897  		throw("MSpanList_InsertBack")
   898  	}
   899  	span.next = list
   900  	span.prev = list.prev
   901  	span.next.prev = span
   902  	span.prev.next = span
   903  }
   904  
   905  const (
   906  	_KindSpecialFinalizer = 1
   907  	_KindSpecialProfile   = 2
   908  	// Note: The finalizer special must be first because if we're freeing
   909  	// an object, a finalizer special will cause the freeing operation
   910  	// to abort, and we want to keep the other special records around
   911  	// if that happens.
   912  )
   913  
   914  type special struct {
   915  	next   *special // linked list in span
   916  	offset uint16   // span offset of object
   917  	kind   byte     // kind of special
   918  }
   919  
   920  // Adds the special record s to the list of special records for
   921  // the object p.  All fields of s should be filled in except for
   922  // offset & next, which this routine will fill in.
   923  // Returns true if the special was successfully added, false otherwise.
   924  // (The add will fail only if a record with the same p and s->kind
   925  //  already exists.)
   926  func addspecial(p unsafe.Pointer, s *special) bool {
   927  	span := mHeap_LookupMaybe(&mheap_, p)
   928  	if span == nil {
   929  		throw("addspecial on invalid pointer")
   930  	}
   931  
   932  	// Ensure that the span is swept.
   933  	// GC accesses specials list w/o locks. And it's just much safer.
   934  	mp := acquirem()
   935  	mSpan_EnsureSwept(span)
   936  
   937  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
   938  	kind := s.kind
   939  
   940  	lock(&span.speciallock)
   941  
   942  	// Find splice point, check for existing record.
   943  	t := &span.specials
   944  	for {
   945  		x := *t
   946  		if x == nil {
   947  			break
   948  		}
   949  		if offset == uintptr(x.offset) && kind == x.kind {
   950  			unlock(&span.speciallock)
   951  			releasem(mp)
   952  			return false // already exists
   953  		}
   954  		if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
   955  			break
   956  		}
   957  		t = &x.next
   958  	}
   959  
   960  	// Splice in record, fill in offset.
   961  	s.offset = uint16(offset)
   962  	s.next = *t
   963  	*t = s
   964  	unlock(&span.speciallock)
   965  	releasem(mp)
   966  
   967  	return true
   968  }
   969  
   970  // Removes the Special record of the given kind for the object p.
   971  // Returns the record if the record existed, nil otherwise.
   972  // The caller must FixAlloc_Free the result.
   973  func removespecial(p unsafe.Pointer, kind uint8) *special {
   974  	span := mHeap_LookupMaybe(&mheap_, p)
   975  	if span == nil {
   976  		throw("removespecial on invalid pointer")
   977  	}
   978  
   979  	// Ensure that the span is swept.
   980  	// GC accesses specials list w/o locks. And it's just much safer.
   981  	mp := acquirem()
   982  	mSpan_EnsureSwept(span)
   983  
   984  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
   985  
   986  	lock(&span.speciallock)
   987  	t := &span.specials
   988  	for {
   989  		s := *t
   990  		if s == nil {
   991  			break
   992  		}
   993  		// This function is used for finalizers only, so we don't check for
   994  		// "interior" specials (p must be exactly equal to s->offset).
   995  		if offset == uintptr(s.offset) && kind == s.kind {
   996  			*t = s.next
   997  			unlock(&span.speciallock)
   998  			releasem(mp)
   999  			return s
  1000  		}
  1001  		t = &s.next
  1002  	}
  1003  	unlock(&span.speciallock)
  1004  	releasem(mp)
  1005  	return nil
  1006  }
  1007  
  1008  // The described object has a finalizer set for it.
  1009  type specialfinalizer struct {
  1010  	special special
  1011  	fn      *funcval
  1012  	nret    uintptr
  1013  	fint    *_type
  1014  	ot      *ptrtype
  1015  }
  1016  
  1017  // Adds a finalizer to the object p.  Returns true if it succeeded.
  1018  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
  1019  	lock(&mheap_.speciallock)
  1020  	s := (*specialfinalizer)(fixAlloc_Alloc(&mheap_.specialfinalizeralloc))
  1021  	unlock(&mheap_.speciallock)
  1022  	s.special.kind = _KindSpecialFinalizer
  1023  	s.fn = f
  1024  	s.nret = nret
  1025  	s.fint = fint
  1026  	s.ot = ot
  1027  	if addspecial(p, &s.special) {
  1028  		return true
  1029  	}
  1030  
  1031  	// There was an old finalizer
  1032  	lock(&mheap_.speciallock)
  1033  	fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
  1034  	unlock(&mheap_.speciallock)
  1035  	return false
  1036  }
  1037  
  1038  // Removes the finalizer (if any) from the object p.
  1039  func removefinalizer(p unsafe.Pointer) {
  1040  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
  1041  	if s == nil {
  1042  		return // there wasn't a finalizer to remove
  1043  	}
  1044  	lock(&mheap_.speciallock)
  1045  	fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
  1046  	unlock(&mheap_.speciallock)
  1047  }
  1048  
  1049  // The described object is being heap profiled.
  1050  type specialprofile struct {
  1051  	special special
  1052  	b       *bucket
  1053  }
  1054  
  1055  // Set the heap profile bucket associated with addr to b.
  1056  func setprofilebucket(p unsafe.Pointer, b *bucket) {
  1057  	lock(&mheap_.speciallock)
  1058  	s := (*specialprofile)(fixAlloc_Alloc(&mheap_.specialprofilealloc))
  1059  	unlock(&mheap_.speciallock)
  1060  	s.special.kind = _KindSpecialProfile
  1061  	s.b = b
  1062  	if !addspecial(p, &s.special) {
  1063  		throw("setprofilebucket: profile already set")
  1064  	}
  1065  }
  1066  
  1067  // Do whatever cleanup needs to be done to deallocate s.  It has
  1068  // already been unlinked from the MSpan specials list.
  1069  // Returns true if we should keep working on deallocating p.
  1070  func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool {
  1071  	switch s.kind {
  1072  	case _KindSpecialFinalizer:
  1073  		sf := (*specialfinalizer)(unsafe.Pointer(s))
  1074  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
  1075  		lock(&mheap_.speciallock)
  1076  		fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(sf))
  1077  		unlock(&mheap_.speciallock)
  1078  		return false // don't free p until finalizer is done
  1079  	case _KindSpecialProfile:
  1080  		sp := (*specialprofile)(unsafe.Pointer(s))
  1081  		mProf_Free(sp.b, size, freed)
  1082  		lock(&mheap_.speciallock)
  1083  		fixAlloc_Free(&mheap_.specialprofilealloc, (unsafe.Pointer)(sp))
  1084  		unlock(&mheap_.speciallock)
  1085  		return true
  1086  	default:
  1087  		throw("bad special kind")
  1088  		panic("not reached")
  1089  	}
  1090  }