github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/mheap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.go for overview.
     8  
     9  package runtime
    10  
    11  import (
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  // Main malloc heap.
    18  // The heap itself is the "free[]" and "large" arrays,
    19  // but all the other global data is here too.
    20  type mheap struct {
    21  	lock      mutex
    22  	free      [_MaxMHeapList]mSpanList // free lists of given length
    23  	freelarge mSpanList                // free lists length >= _MaxMHeapList
    24  	busy      [_MaxMHeapList]mSpanList // busy lists of large objects of given length
    25  	busylarge mSpanList                // busy lists of large objects length >= _MaxMHeapList
    26  	allspans  **mspan                  // all spans out there
    27  	gcspans   **mspan                  // copy of allspans referenced by gc marker or sweeper
    28  	nspan     uint32
    29  	sweepgen  uint32 // sweep generation, see comment in mspan
    30  	sweepdone uint32 // all spans are swept
    31  	// span lookup
    32  	spans        **mspan
    33  	spans_mapped uintptr
    34  
    35  	// Proportional sweep
    36  	pagesInUse        uint64  // pages of spans in stats _MSpanInUse; R/W with mheap.lock
    37  	spanBytesAlloc    uint64  // bytes of spans allocated this cycle; updated atomically
    38  	pagesSwept        uint64  // pages swept this cycle; updated atomically
    39  	sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
    40  	// TODO(austin): pagesInUse should be a uintptr, but the 386
    41  	// compiler can't 8-byte align fields.
    42  
    43  	// Malloc stats.
    44  	largefree  uint64                  // bytes freed for large objects (>maxsmallsize)
    45  	nlargefree uint64                  // number of frees for large objects (>maxsmallsize)
    46  	nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
    47  
    48  	// range of addresses we might see in the heap
    49  	bitmap         uintptr
    50  	bitmap_mapped  uintptr
    51  	arena_start    uintptr
    52  	arena_used     uintptr // always mHeap_Map{Bits,Spans} before updating
    53  	arena_end      uintptr
    54  	arena_reserved bool
    55  
    56  	// central free lists for small size classes.
    57  	// the padding makes sure that the MCentrals are
    58  	// spaced CacheLineSize bytes apart, so that each MCentral.lock
    59  	// gets its own cache line.
    60  	central [_NumSizeClasses]struct {
    61  		mcentral mcentral
    62  		pad      [sys.CacheLineSize]byte
    63  	}
    64  
    65  	spanalloc             fixalloc // allocator for span*
    66  	cachealloc            fixalloc // allocator for mcache*
    67  	specialfinalizeralloc fixalloc // allocator for specialfinalizer*
    68  	specialprofilealloc   fixalloc // allocator for specialprofile*
    69  	speciallock           mutex    // lock for special record allocators.
    70  }
    71  
    72  var mheap_ mheap
    73  
    74  // An MSpan is a run of pages.
    75  //
    76  // When a MSpan is in the heap free list, state == MSpanFree
    77  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
    78  //
    79  // When a MSpan is allocated, state == MSpanInUse or MSpanStack
    80  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
    81  
    82  // Every MSpan is in one doubly-linked list,
    83  // either one of the MHeap's free lists or one of the
    84  // MCentral's span lists.
    85  
    86  // An MSpan representing actual memory has state _MSpanInUse,
    87  // _MSpanStack, or _MSpanFree. Transitions between these states are
    88  // constrained as follows:
    89  //
    90  // * A span may transition from free to in-use or stack during any GC
    91  //   phase.
    92  //
    93  // * During sweeping (gcphase == _GCoff), a span may transition from
    94  //   in-use to free (as a result of sweeping) or stack to free (as a
    95  //   result of stacks being freed).
    96  //
    97  // * During GC (gcphase != _GCoff), a span *must not* transition from
    98  //   stack or in-use to free. Because concurrent GC may read a pointer
    99  //   and then look up its span, the span state must be monotonic.
   100  const (
   101  	_MSpanInUse = iota // allocated for garbage collected heap
   102  	_MSpanStack        // allocated for use by stack allocator
   103  	_MSpanFree
   104  	_MSpanDead
   105  )
   106  
   107  // mSpanList heads a linked list of spans.
   108  //
   109  // Linked list structure is based on BSD's "tail queue" data structure.
   110  type mSpanList struct {
   111  	first *mspan  // first span in list, or nil if none
   112  	last  **mspan // last span's next field, or first if none
   113  }
   114  
   115  type mspan struct {
   116  	next *mspan     // next span in list, or nil if none
   117  	prev **mspan    // previous span's next field, or list head's first field if none
   118  	list *mSpanList // For debugging. TODO: Remove.
   119  
   120  	start    pageID    // starting page number
   121  	npages   uintptr   // number of pages in span
   122  	freelist gclinkptr // list of free objects
   123  	// sweep generation:
   124  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   125  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   126  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   127  	// h->sweepgen is incremented by 2 after every GC
   128  
   129  	sweepgen    uint32
   130  	divMul      uint32   // for divide by elemsize - divMagic.mul
   131  	ref         uint16   // capacity - number of objects in freelist
   132  	sizeclass   uint8    // size class
   133  	incache     bool     // being used by an mcache
   134  	state       uint8    // mspaninuse etc
   135  	needzero    uint8    // needs to be zeroed before allocation
   136  	divShift    uint8    // for divide by elemsize - divMagic.shift
   137  	divShift2   uint8    // for divide by elemsize - divMagic.shift2
   138  	elemsize    uintptr  // computed from sizeclass or from npages
   139  	unusedsince int64    // first time spotted by gc in mspanfree state
   140  	npreleased  uintptr  // number of pages released to the os
   141  	limit       uintptr  // end of data in span
   142  	speciallock mutex    // guards specials list
   143  	specials    *special // linked list of special records sorted by offset.
   144  	baseMask    uintptr  // if non-0, elemsize is a power of 2, & this will get object allocation base
   145  }
   146  
   147  func (s *mspan) base() uintptr {
   148  	return uintptr(s.start << _PageShift)
   149  }
   150  
   151  func (s *mspan) layout() (size, n, total uintptr) {
   152  	total = s.npages << _PageShift
   153  	size = s.elemsize
   154  	if size > 0 {
   155  		n = total / size
   156  	}
   157  	return
   158  }
   159  
   160  var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
   161  
   162  // h_spans is a lookup table to map virtual address page IDs to *mspan.
   163  // For allocated spans, their pages map to the span itself.
   164  // For free spans, only the lowest and highest pages map to the span itself.  Internal
   165  // pages map to an arbitrary span.
   166  // For pages that have never been allocated, h_spans entries are nil.
   167  var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
   168  
   169  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
   170  	h := (*mheap)(vh)
   171  	s := (*mspan)(p)
   172  	if len(h_allspans) >= cap(h_allspans) {
   173  		n := 64 * 1024 / sys.PtrSize
   174  		if n < cap(h_allspans)*3/2 {
   175  			n = cap(h_allspans) * 3 / 2
   176  		}
   177  		var new []*mspan
   178  		sp := (*slice)(unsafe.Pointer(&new))
   179  		sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
   180  		if sp.array == nil {
   181  			throw("runtime: cannot allocate memory")
   182  		}
   183  		sp.len = len(h_allspans)
   184  		sp.cap = n
   185  		if len(h_allspans) > 0 {
   186  			copy(new, h_allspans)
   187  			// Don't free the old array if it's referenced by sweep.
   188  			// See the comment in mgc.go.
   189  			if h.allspans != mheap_.gcspans {
   190  				sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys)
   191  			}
   192  		}
   193  		h_allspans = new
   194  		h.allspans = (**mspan)(unsafe.Pointer(sp.array))
   195  	}
   196  	h_allspans = append(h_allspans, s)
   197  	h.nspan = uint32(len(h_allspans))
   198  }
   199  
   200  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   201  // It returns false for pointers into stack spans.
   202  // Non-preemptible because it is used by write barriers.
   203  //go:nowritebarrier
   204  //go:nosplit
   205  func inheap(b uintptr) bool {
   206  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   207  		return false
   208  	}
   209  	// Not a beginning of a block, consult span table to find the block beginning.
   210  	k := b >> _PageShift
   211  	x := k
   212  	x -= mheap_.arena_start >> _PageShift
   213  	s := h_spans[x]
   214  	if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   215  		return false
   216  	}
   217  	return true
   218  }
   219  
   220  // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
   221  // Use the functions instead.
   222  
   223  // spanOf returns the span of p. If p does not point into the heap or
   224  // no span contains p, spanOf returns nil.
   225  func spanOf(p uintptr) *mspan {
   226  	if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
   227  		return nil
   228  	}
   229  	return spanOfUnchecked(p)
   230  }
   231  
   232  // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
   233  // that p points into the heap (that is, mheap_.arena_start <= p <
   234  // mheap_.arena_used).
   235  func spanOfUnchecked(p uintptr) *mspan {
   236  	return h_spans[(p-mheap_.arena_start)>>_PageShift]
   237  }
   238  
   239  func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
   240  	_g_ := getg()
   241  
   242  	_g_.m.mcache.local_nlookup++
   243  	if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
   244  		// purge cache stats to prevent overflow
   245  		lock(&mheap_.lock)
   246  		purgecachedstats(_g_.m.mcache)
   247  		unlock(&mheap_.lock)
   248  	}
   249  
   250  	s := mheap_.lookupMaybe(unsafe.Pointer(v))
   251  	if sp != nil {
   252  		*sp = s
   253  	}
   254  	if s == nil {
   255  		if base != nil {
   256  			*base = 0
   257  		}
   258  		if size != nil {
   259  			*size = 0
   260  		}
   261  		return 0
   262  	}
   263  
   264  	p := uintptr(s.start) << _PageShift
   265  	if s.sizeclass == 0 {
   266  		// Large object.
   267  		if base != nil {
   268  			*base = p
   269  		}
   270  		if size != nil {
   271  			*size = s.npages << _PageShift
   272  		}
   273  		return 1
   274  	}
   275  
   276  	n := s.elemsize
   277  	if base != nil {
   278  		i := (uintptr(v) - uintptr(p)) / n
   279  		*base = p + i*n
   280  	}
   281  	if size != nil {
   282  		*size = n
   283  	}
   284  
   285  	return 1
   286  }
   287  
   288  // Initialize the heap.
   289  func (h *mheap) init(spans_size uintptr) {
   290  	h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
   291  	h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
   292  	h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
   293  	h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
   294  
   295  	// h->mapcache needs no init
   296  	for i := range h.free {
   297  		h.free[i].init()
   298  		h.busy[i].init()
   299  	}
   300  
   301  	h.freelarge.init()
   302  	h.busylarge.init()
   303  	for i := range h.central {
   304  		h.central[i].mcentral.init(int32(i))
   305  	}
   306  
   307  	sp := (*slice)(unsafe.Pointer(&h_spans))
   308  	sp.array = unsafe.Pointer(h.spans)
   309  	sp.len = int(spans_size / sys.PtrSize)
   310  	sp.cap = int(spans_size / sys.PtrSize)
   311  }
   312  
   313  // mHeap_MapSpans makes sure that the spans are mapped
   314  // up to the new value of arena_used.
   315  //
   316  // It must be called with the expected new value of arena_used,
   317  // *before* h.arena_used has been updated.
   318  // Waiting to update arena_used until after the memory has been mapped
   319  // avoids faults when other threads try access the bitmap immediately
   320  // after observing the change to arena_used.
   321  func (h *mheap) mapSpans(arena_used uintptr) {
   322  	// Map spans array, PageSize at a time.
   323  	n := arena_used
   324  	n -= h.arena_start
   325  	n = n / _PageSize * sys.PtrSize
   326  	n = round(n, sys.PhysPageSize)
   327  	if h.spans_mapped >= n {
   328  		return
   329  	}
   330  	sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys)
   331  	h.spans_mapped = n
   332  }
   333  
   334  // Sweeps spans in list until reclaims at least npages into heap.
   335  // Returns the actual number of pages reclaimed.
   336  func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
   337  	n := uintptr(0)
   338  	sg := mheap_.sweepgen
   339  retry:
   340  	for s := list.first; s != nil; s = s.next {
   341  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   342  			list.remove(s)
   343  			// swept spans are at the end of the list
   344  			list.insertBack(s)
   345  			unlock(&h.lock)
   346  			snpages := s.npages
   347  			if s.sweep(false) {
   348  				n += snpages
   349  			}
   350  			lock(&h.lock)
   351  			if n >= npages {
   352  				return n
   353  			}
   354  			// the span could have been moved elsewhere
   355  			goto retry
   356  		}
   357  		if s.sweepgen == sg-1 {
   358  			// the span is being sweept by background sweeper, skip
   359  			continue
   360  		}
   361  		// already swept empty span,
   362  		// all subsequent ones must also be either swept or in process of sweeping
   363  		break
   364  	}
   365  	return n
   366  }
   367  
   368  // Sweeps and reclaims at least npage pages into heap.
   369  // Called before allocating npage pages.
   370  func (h *mheap) reclaim(npage uintptr) {
   371  	// First try to sweep busy spans with large objects of size >= npage,
   372  	// this has good chances of reclaiming the necessary space.
   373  	for i := int(npage); i < len(h.busy); i++ {
   374  		if h.reclaimList(&h.busy[i], npage) != 0 {
   375  			return // Bingo!
   376  		}
   377  	}
   378  
   379  	// Then -- even larger objects.
   380  	if h.reclaimList(&h.busylarge, npage) != 0 {
   381  		return // Bingo!
   382  	}
   383  
   384  	// Now try smaller objects.
   385  	// One such object is not enough, so we need to reclaim several of them.
   386  	reclaimed := uintptr(0)
   387  	for i := 0; i < int(npage) && i < len(h.busy); i++ {
   388  		reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
   389  		if reclaimed >= npage {
   390  			return
   391  		}
   392  	}
   393  
   394  	// Now sweep everything that is not yet swept.
   395  	unlock(&h.lock)
   396  	for {
   397  		n := sweepone()
   398  		if n == ^uintptr(0) { // all spans are swept
   399  			break
   400  		}
   401  		reclaimed += n
   402  		if reclaimed >= npage {
   403  			break
   404  		}
   405  	}
   406  	lock(&h.lock)
   407  }
   408  
   409  // Allocate a new span of npage pages from the heap for GC'd memory
   410  // and record its size class in the HeapMap and HeapMapCache.
   411  func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
   412  	_g_ := getg()
   413  	if _g_ != _g_.m.g0 {
   414  		throw("_mheap_alloc not on g0 stack")
   415  	}
   416  	lock(&h.lock)
   417  
   418  	// To prevent excessive heap growth, before allocating n pages
   419  	// we need to sweep and reclaim at least n pages.
   420  	if h.sweepdone == 0 {
   421  		// TODO(austin): This tends to sweep a large number of
   422  		// spans in order to find a few completely free spans
   423  		// (for example, in the garbage benchmark, this sweeps
   424  		// ~30x the number of pages its trying to allocate).
   425  		// If GC kept a bit for whether there were any marks
   426  		// in a span, we could release these free spans
   427  		// at the end of GC and eliminate this entirely.
   428  		h.reclaim(npage)
   429  	}
   430  
   431  	// transfer stats from cache to global
   432  	memstats.heap_live += uint64(_g_.m.mcache.local_cachealloc)
   433  	_g_.m.mcache.local_cachealloc = 0
   434  	memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
   435  	_g_.m.mcache.local_scan = 0
   436  	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
   437  	_g_.m.mcache.local_tinyallocs = 0
   438  
   439  	s := h.allocSpanLocked(npage)
   440  	if s != nil {
   441  		// Record span info, because gc needs to be
   442  		// able to map interior pointer to containing span.
   443  		atomic.Store(&s.sweepgen, h.sweepgen)
   444  		s.state = _MSpanInUse
   445  		s.freelist = 0
   446  		s.ref = 0
   447  		s.sizeclass = uint8(sizeclass)
   448  		if sizeclass == 0 {
   449  			s.elemsize = s.npages << _PageShift
   450  			s.divShift = 0
   451  			s.divMul = 0
   452  			s.divShift2 = 0
   453  			s.baseMask = 0
   454  		} else {
   455  			s.elemsize = uintptr(class_to_size[sizeclass])
   456  			m := &class_to_divmagic[sizeclass]
   457  			s.divShift = m.shift
   458  			s.divMul = m.mul
   459  			s.divShift2 = m.shift2
   460  			s.baseMask = m.baseMask
   461  		}
   462  
   463  		// update stats, sweep lists
   464  		h.pagesInUse += uint64(npage)
   465  		if large {
   466  			memstats.heap_objects++
   467  			memstats.heap_live += uint64(npage << _PageShift)
   468  			// Swept spans are at the end of lists.
   469  			if s.npages < uintptr(len(h.free)) {
   470  				h.busy[s.npages].insertBack(s)
   471  			} else {
   472  				h.busylarge.insertBack(s)
   473  			}
   474  		}
   475  	}
   476  	// heap_scan and heap_live were updated.
   477  	if gcBlackenEnabled != 0 {
   478  		gcController.revise()
   479  	}
   480  
   481  	if trace.enabled {
   482  		traceHeapAlloc()
   483  	}
   484  
   485  	// h_spans is accessed concurrently without synchronization
   486  	// from other threads. Hence, there must be a store/store
   487  	// barrier here to ensure the writes to h_spans above happen
   488  	// before the caller can publish a pointer p to an object
   489  	// allocated from s. As soon as this happens, the garbage
   490  	// collector running on another processor could read p and
   491  	// look up s in h_spans. The unlock acts as the barrier to
   492  	// order these writes. On the read side, the data dependency
   493  	// between p and the index in h_spans orders the reads.
   494  	unlock(&h.lock)
   495  	return s
   496  }
   497  
   498  func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
   499  	// Don't do any operations that lock the heap on the G stack.
   500  	// It might trigger stack growth, and the stack growth code needs
   501  	// to be able to allocate heap.
   502  	var s *mspan
   503  	systemstack(func() {
   504  		s = h.alloc_m(npage, sizeclass, large)
   505  	})
   506  
   507  	if s != nil {
   508  		if needzero && s.needzero != 0 {
   509  			memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
   510  		}
   511  		s.needzero = 0
   512  	}
   513  	return s
   514  }
   515  
   516  func (h *mheap) allocStack(npage uintptr) *mspan {
   517  	_g_ := getg()
   518  	if _g_ != _g_.m.g0 {
   519  		throw("mheap_allocstack not on g0 stack")
   520  	}
   521  	lock(&h.lock)
   522  	s := h.allocSpanLocked(npage)
   523  	if s != nil {
   524  		s.state = _MSpanStack
   525  		s.freelist = 0
   526  		s.ref = 0
   527  		memstats.stacks_inuse += uint64(s.npages << _PageShift)
   528  	}
   529  
   530  	// This unlock acts as a release barrier. See mHeap_Alloc_m.
   531  	unlock(&h.lock)
   532  	return s
   533  }
   534  
   535  // Allocates a span of the given size.  h must be locked.
   536  // The returned span has been removed from the
   537  // free list, but its state is still MSpanFree.
   538  func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
   539  	var list *mSpanList
   540  	var s *mspan
   541  
   542  	// Try in fixed-size lists up to max.
   543  	for i := int(npage); i < len(h.free); i++ {
   544  		list = &h.free[i]
   545  		if !list.isEmpty() {
   546  			s = list.first
   547  			goto HaveSpan
   548  		}
   549  	}
   550  
   551  	// Best fit in list of large spans.
   552  	list = &h.freelarge
   553  	s = h.allocLarge(npage)
   554  	if s == nil {
   555  		if !h.grow(npage) {
   556  			return nil
   557  		}
   558  		s = h.allocLarge(npage)
   559  		if s == nil {
   560  			return nil
   561  		}
   562  	}
   563  
   564  HaveSpan:
   565  	// Mark span in use.
   566  	if s.state != _MSpanFree {
   567  		throw("MHeap_AllocLocked - MSpan not free")
   568  	}
   569  	if s.npages < npage {
   570  		throw("MHeap_AllocLocked - bad npages")
   571  	}
   572  	list.remove(s)
   573  	if s.inList() {
   574  		throw("still in list")
   575  	}
   576  	if s.npreleased > 0 {
   577  		sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
   578  		memstats.heap_released -= uint64(s.npreleased << _PageShift)
   579  		s.npreleased = 0
   580  	}
   581  
   582  	if s.npages > npage {
   583  		// Trim extra and put it back in the heap.
   584  		t := (*mspan)(h.spanalloc.alloc())
   585  		t.init(s.start+pageID(npage), s.npages-npage)
   586  		s.npages = npage
   587  		p := uintptr(t.start)
   588  		p -= (h.arena_start >> _PageShift)
   589  		if p > 0 {
   590  			h_spans[p-1] = s
   591  		}
   592  		h_spans[p] = t
   593  		h_spans[p+t.npages-1] = t
   594  		t.needzero = s.needzero
   595  		s.state = _MSpanStack // prevent coalescing with s
   596  		t.state = _MSpanStack
   597  		h.freeSpanLocked(t, false, false, s.unusedsince)
   598  		s.state = _MSpanFree
   599  	}
   600  	s.unusedsince = 0
   601  
   602  	p := uintptr(s.start)
   603  	p -= (h.arena_start >> _PageShift)
   604  	for n := uintptr(0); n < npage; n++ {
   605  		h_spans[p+n] = s
   606  	}
   607  
   608  	memstats.heap_inuse += uint64(npage << _PageShift)
   609  	memstats.heap_idle -= uint64(npage << _PageShift)
   610  
   611  	//println("spanalloc", hex(s.start<<_PageShift))
   612  	if s.inList() {
   613  		throw("still in list")
   614  	}
   615  	return s
   616  }
   617  
   618  // Allocate a span of exactly npage pages from the list of large spans.
   619  func (h *mheap) allocLarge(npage uintptr) *mspan {
   620  	return bestFit(&h.freelarge, npage, nil)
   621  }
   622  
   623  // Search list for smallest span with >= npage pages.
   624  // If there are multiple smallest spans, take the one
   625  // with the earliest starting address.
   626  func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan {
   627  	for s := list.first; s != nil; s = s.next {
   628  		if s.npages < npage {
   629  			continue
   630  		}
   631  		if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) {
   632  			best = s
   633  		}
   634  	}
   635  	return best
   636  }
   637  
   638  // Try to add at least npage pages of memory to the heap,
   639  // returning whether it worked.
   640  //
   641  // h must be locked.
   642  func (h *mheap) grow(npage uintptr) bool {
   643  	// Ask for a big chunk, to reduce the number of mappings
   644  	// the operating system needs to track; also amortizes
   645  	// the overhead of an operating system mapping.
   646  	// Allocate a multiple of 64kB.
   647  	npage = round(npage, (64<<10)/_PageSize)
   648  	ask := npage << _PageShift
   649  	if ask < _HeapAllocChunk {
   650  		ask = _HeapAllocChunk
   651  	}
   652  
   653  	v := h.sysAlloc(ask)
   654  	if v == nil {
   655  		if ask > npage<<_PageShift {
   656  			ask = npage << _PageShift
   657  			v = h.sysAlloc(ask)
   658  		}
   659  		if v == nil {
   660  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
   661  			return false
   662  		}
   663  	}
   664  
   665  	// Create a fake "in use" span and free it, so that the
   666  	// right coalescing happens.
   667  	s := (*mspan)(h.spanalloc.alloc())
   668  	s.init(pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
   669  	p := uintptr(s.start)
   670  	p -= (h.arena_start >> _PageShift)
   671  	for i := p; i < p+s.npages; i++ {
   672  		h_spans[i] = s
   673  	}
   674  	atomic.Store(&s.sweepgen, h.sweepgen)
   675  	s.state = _MSpanInUse
   676  	h.pagesInUse += uint64(npage)
   677  	h.freeSpanLocked(s, false, true, 0)
   678  	return true
   679  }
   680  
   681  // Look up the span at the given address.
   682  // Address is guaranteed to be in map
   683  // and is guaranteed to be start or end of span.
   684  func (h *mheap) lookup(v unsafe.Pointer) *mspan {
   685  	p := uintptr(v)
   686  	p -= h.arena_start
   687  	return h_spans[p>>_PageShift]
   688  }
   689  
   690  // Look up the span at the given address.
   691  // Address is *not* guaranteed to be in map
   692  // and may be anywhere in the span.
   693  // Map entries for the middle of a span are only
   694  // valid for allocated spans.  Free spans may have
   695  // other garbage in their middles, so we have to
   696  // check for that.
   697  func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
   698  	if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
   699  		return nil
   700  	}
   701  	p := uintptr(v) >> _PageShift
   702  	q := p
   703  	q -= h.arena_start >> _PageShift
   704  	s := h_spans[q]
   705  	if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
   706  		return nil
   707  	}
   708  	return s
   709  }
   710  
   711  // Free the span back into the heap.
   712  func (h *mheap) freeSpan(s *mspan, acct int32) {
   713  	systemstack(func() {
   714  		mp := getg().m
   715  		lock(&h.lock)
   716  		memstats.heap_live += uint64(mp.mcache.local_cachealloc)
   717  		mp.mcache.local_cachealloc = 0
   718  		memstats.heap_scan += uint64(mp.mcache.local_scan)
   719  		mp.mcache.local_scan = 0
   720  		memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
   721  		mp.mcache.local_tinyallocs = 0
   722  		if acct != 0 {
   723  			memstats.heap_objects--
   724  		}
   725  		if gcBlackenEnabled != 0 {
   726  			gcController.revise()
   727  		}
   728  		h.freeSpanLocked(s, true, true, 0)
   729  		if trace.enabled {
   730  			traceHeapAlloc()
   731  		}
   732  		unlock(&h.lock)
   733  	})
   734  }
   735  
   736  func (h *mheap) freeStack(s *mspan) {
   737  	_g_ := getg()
   738  	if _g_ != _g_.m.g0 {
   739  		throw("mheap_freestack not on g0 stack")
   740  	}
   741  	s.needzero = 1
   742  	lock(&h.lock)
   743  	memstats.stacks_inuse -= uint64(s.npages << _PageShift)
   744  	h.freeSpanLocked(s, true, true, 0)
   745  	unlock(&h.lock)
   746  }
   747  
   748  // s must be on a busy list (h.busy or h.busylarge) or unlinked.
   749  func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
   750  	switch s.state {
   751  	case _MSpanStack:
   752  		if s.ref != 0 {
   753  			throw("MHeap_FreeSpanLocked - invalid stack free")
   754  		}
   755  	case _MSpanInUse:
   756  		if s.ref != 0 || s.sweepgen != h.sweepgen {
   757  			print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
   758  			throw("MHeap_FreeSpanLocked - invalid free")
   759  		}
   760  		h.pagesInUse -= uint64(s.npages)
   761  	default:
   762  		throw("MHeap_FreeSpanLocked - invalid span state")
   763  	}
   764  
   765  	if acctinuse {
   766  		memstats.heap_inuse -= uint64(s.npages << _PageShift)
   767  	}
   768  	if acctidle {
   769  		memstats.heap_idle += uint64(s.npages << _PageShift)
   770  	}
   771  	s.state = _MSpanFree
   772  	if s.inList() {
   773  		h.busyList(s.npages).remove(s)
   774  	}
   775  
   776  	// Stamp newly unused spans. The scavenger will use that
   777  	// info to potentially give back some pages to the OS.
   778  	s.unusedsince = unusedsince
   779  	if unusedsince == 0 {
   780  		s.unusedsince = nanotime()
   781  	}
   782  	s.npreleased = 0
   783  
   784  	// Coalesce with earlier, later spans.
   785  	p := uintptr(s.start)
   786  	p -= h.arena_start >> _PageShift
   787  	if p > 0 {
   788  		t := h_spans[p-1]
   789  		if t != nil && t.state == _MSpanFree {
   790  			s.start = t.start
   791  			s.npages += t.npages
   792  			s.npreleased = t.npreleased // absorb released pages
   793  			s.needzero |= t.needzero
   794  			p -= t.npages
   795  			h_spans[p] = s
   796  			h.freeList(t.npages).remove(t)
   797  			t.state = _MSpanDead
   798  			h.spanalloc.free(unsafe.Pointer(t))
   799  		}
   800  	}
   801  	if (p+s.npages)*sys.PtrSize < h.spans_mapped {
   802  		t := h_spans[p+s.npages]
   803  		if t != nil && t.state == _MSpanFree {
   804  			s.npages += t.npages
   805  			s.npreleased += t.npreleased
   806  			s.needzero |= t.needzero
   807  			h_spans[p+s.npages-1] = s
   808  			h.freeList(t.npages).remove(t)
   809  			t.state = _MSpanDead
   810  			h.spanalloc.free(unsafe.Pointer(t))
   811  		}
   812  	}
   813  
   814  	// Insert s into appropriate list.
   815  	h.freeList(s.npages).insert(s)
   816  }
   817  
   818  func (h *mheap) freeList(npages uintptr) *mSpanList {
   819  	if npages < uintptr(len(h.free)) {
   820  		return &h.free[npages]
   821  	}
   822  	return &h.freelarge
   823  }
   824  
   825  func (h *mheap) busyList(npages uintptr) *mSpanList {
   826  	if npages < uintptr(len(h.free)) {
   827  		return &h.busy[npages]
   828  	}
   829  	return &h.busylarge
   830  }
   831  
   832  func scavengelist(list *mSpanList, now, limit uint64) uintptr {
   833  	if sys.PhysPageSize > _PageSize {
   834  		// golang.org/issue/9993
   835  		// If the physical page size of the machine is larger than
   836  		// our logical heap page size the kernel may round up the
   837  		// amount to be freed to its page size and corrupt the heap
   838  		// pages surrounding the unused block.
   839  		return 0
   840  	}
   841  
   842  	if list.isEmpty() {
   843  		return 0
   844  	}
   845  
   846  	var sumreleased uintptr
   847  	for s := list.first; s != nil; s = s.next {
   848  		if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
   849  			released := (s.npages - s.npreleased) << _PageShift
   850  			memstats.heap_released += uint64(released)
   851  			sumreleased += released
   852  			s.npreleased = s.npages
   853  			sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
   854  		}
   855  	}
   856  	return sumreleased
   857  }
   858  
   859  func (h *mheap) scavenge(k int32, now, limit uint64) {
   860  	lock(&h.lock)
   861  	var sumreleased uintptr
   862  	for i := 0; i < len(h.free); i++ {
   863  		sumreleased += scavengelist(&h.free[i], now, limit)
   864  	}
   865  	sumreleased += scavengelist(&h.freelarge, now, limit)
   866  	unlock(&h.lock)
   867  
   868  	if debug.gctrace > 0 {
   869  		if sumreleased > 0 {
   870  			print("scvg", k, ": ", sumreleased>>20, " MB released\n")
   871  		}
   872  		// TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
   873  		// But we can't call ReadMemStats on g0 holding locks.
   874  		print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
   875  	}
   876  }
   877  
   878  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
   879  func runtime_debug_freeOSMemory() {
   880  	gcStart(gcForceBlockMode, false)
   881  	systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
   882  }
   883  
   884  // Initialize a new span with the given start and npages.
   885  func (span *mspan) init(start pageID, npages uintptr) {
   886  	span.next = nil
   887  	span.prev = nil
   888  	span.list = nil
   889  	span.start = start
   890  	span.npages = npages
   891  	span.freelist = 0
   892  	span.ref = 0
   893  	span.sizeclass = 0
   894  	span.incache = false
   895  	span.elemsize = 0
   896  	span.state = _MSpanDead
   897  	span.unusedsince = 0
   898  	span.npreleased = 0
   899  	span.speciallock.key = 0
   900  	span.specials = nil
   901  	span.needzero = 0
   902  }
   903  
   904  func (span *mspan) inList() bool {
   905  	return span.prev != nil
   906  }
   907  
   908  // Initialize an empty doubly-linked list.
   909  func (list *mSpanList) init() {
   910  	list.first = nil
   911  	list.last = &list.first
   912  }
   913  
   914  func (list *mSpanList) remove(span *mspan) {
   915  	if span.prev == nil || span.list != list {
   916  		println("failed MSpanList_Remove", span, span.prev, span.list, list)
   917  		throw("MSpanList_Remove")
   918  	}
   919  	if span.next != nil {
   920  		span.next.prev = span.prev
   921  	} else {
   922  		// TODO: After we remove the span.list != list check above,
   923  		// we could at least still check list.last == &span.next here.
   924  		list.last = span.prev
   925  	}
   926  	*span.prev = span.next
   927  	span.next = nil
   928  	span.prev = nil
   929  	span.list = nil
   930  }
   931  
   932  func (list *mSpanList) isEmpty() bool {
   933  	return list.first == nil
   934  }
   935  
   936  func (list *mSpanList) insert(span *mspan) {
   937  	if span.next != nil || span.prev != nil || span.list != nil {
   938  		println("failed MSpanList_Insert", span, span.next, span.prev, span.list)
   939  		throw("MSpanList_Insert")
   940  	}
   941  	span.next = list.first
   942  	if list.first != nil {
   943  		list.first.prev = &span.next
   944  	} else {
   945  		list.last = &span.next
   946  	}
   947  	list.first = span
   948  	span.prev = &list.first
   949  	span.list = list
   950  }
   951  
   952  func (list *mSpanList) insertBack(span *mspan) {
   953  	if span.next != nil || span.prev != nil || span.list != nil {
   954  		println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
   955  		throw("MSpanList_InsertBack")
   956  	}
   957  	span.next = nil
   958  	span.prev = list.last
   959  	*list.last = span
   960  	list.last = &span.next
   961  	span.list = list
   962  }
   963  
   964  const (
   965  	_KindSpecialFinalizer = 1
   966  	_KindSpecialProfile   = 2
   967  	// Note: The finalizer special must be first because if we're freeing
   968  	// an object, a finalizer special will cause the freeing operation
   969  	// to abort, and we want to keep the other special records around
   970  	// if that happens.
   971  )
   972  
   973  type special struct {
   974  	next   *special // linked list in span
   975  	offset uint16   // span offset of object
   976  	kind   byte     // kind of special
   977  }
   978  
   979  // Adds the special record s to the list of special records for
   980  // the object p.  All fields of s should be filled in except for
   981  // offset & next, which this routine will fill in.
   982  // Returns true if the special was successfully added, false otherwise.
   983  // (The add will fail only if a record with the same p and s->kind
   984  //  already exists.)
   985  func addspecial(p unsafe.Pointer, s *special) bool {
   986  	span := mheap_.lookupMaybe(p)
   987  	if span == nil {
   988  		throw("addspecial on invalid pointer")
   989  	}
   990  
   991  	// Ensure that the span is swept.
   992  	// Sweeping accesses the specials list w/o locks, so we have
   993  	// to synchronize with it. And it's just much safer.
   994  	mp := acquirem()
   995  	span.ensureSwept()
   996  
   997  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
   998  	kind := s.kind
   999  
  1000  	lock(&span.speciallock)
  1001  
  1002  	// Find splice point, check for existing record.
  1003  	t := &span.specials
  1004  	for {
  1005  		x := *t
  1006  		if x == nil {
  1007  			break
  1008  		}
  1009  		if offset == uintptr(x.offset) && kind == x.kind {
  1010  			unlock(&span.speciallock)
  1011  			releasem(mp)
  1012  			return false // already exists
  1013  		}
  1014  		if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
  1015  			break
  1016  		}
  1017  		t = &x.next
  1018  	}
  1019  
  1020  	// Splice in record, fill in offset.
  1021  	s.offset = uint16(offset)
  1022  	s.next = *t
  1023  	*t = s
  1024  	unlock(&span.speciallock)
  1025  	releasem(mp)
  1026  
  1027  	return true
  1028  }
  1029  
  1030  // Removes the Special record of the given kind for the object p.
  1031  // Returns the record if the record existed, nil otherwise.
  1032  // The caller must FixAlloc_Free the result.
  1033  func removespecial(p unsafe.Pointer, kind uint8) *special {
  1034  	span := mheap_.lookupMaybe(p)
  1035  	if span == nil {
  1036  		throw("removespecial on invalid pointer")
  1037  	}
  1038  
  1039  	// Ensure that the span is swept.
  1040  	// Sweeping accesses the specials list w/o locks, so we have
  1041  	// to synchronize with it. And it's just much safer.
  1042  	mp := acquirem()
  1043  	span.ensureSwept()
  1044  
  1045  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
  1046  
  1047  	lock(&span.speciallock)
  1048  	t := &span.specials
  1049  	for {
  1050  		s := *t
  1051  		if s == nil {
  1052  			break
  1053  		}
  1054  		// This function is used for finalizers only, so we don't check for
  1055  		// "interior" specials (p must be exactly equal to s->offset).
  1056  		if offset == uintptr(s.offset) && kind == s.kind {
  1057  			*t = s.next
  1058  			unlock(&span.speciallock)
  1059  			releasem(mp)
  1060  			return s
  1061  		}
  1062  		t = &s.next
  1063  	}
  1064  	unlock(&span.speciallock)
  1065  	releasem(mp)
  1066  	return nil
  1067  }
  1068  
  1069  // The described object has a finalizer set for it.
  1070  type specialfinalizer struct {
  1071  	special special
  1072  	fn      *funcval
  1073  	nret    uintptr
  1074  	fint    *_type
  1075  	ot      *ptrtype
  1076  }
  1077  
  1078  // Adds a finalizer to the object p.  Returns true if it succeeded.
  1079  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
  1080  	lock(&mheap_.speciallock)
  1081  	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
  1082  	unlock(&mheap_.speciallock)
  1083  	s.special.kind = _KindSpecialFinalizer
  1084  	s.fn = f
  1085  	s.nret = nret
  1086  	s.fint = fint
  1087  	s.ot = ot
  1088  	if addspecial(p, &s.special) {
  1089  		// This is responsible for maintaining the same
  1090  		// GC-related invariants as markrootSpans in any
  1091  		// situation where it's possible that markrootSpans
  1092  		// has already run but mark termination hasn't yet.
  1093  		if gcphase != _GCoff {
  1094  			_, base, _ := findObject(p)
  1095  			mp := acquirem()
  1096  			gcw := &mp.p.ptr().gcw
  1097  			// Mark everything reachable from the object
  1098  			// so it's retained for the finalizer.
  1099  			scanobject(uintptr(base), gcw)
  1100  			// Mark the finalizer itself, since the
  1101  			// special isn't part of the GC'd heap.
  1102  			scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
  1103  			if gcBlackenPromptly {
  1104  				gcw.dispose()
  1105  			}
  1106  			releasem(mp)
  1107  		}
  1108  		return true
  1109  	}
  1110  
  1111  	// There was an old finalizer
  1112  	lock(&mheap_.speciallock)
  1113  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1114  	unlock(&mheap_.speciallock)
  1115  	return false
  1116  }
  1117  
  1118  // Removes the finalizer (if any) from the object p.
  1119  func removefinalizer(p unsafe.Pointer) {
  1120  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
  1121  	if s == nil {
  1122  		return // there wasn't a finalizer to remove
  1123  	}
  1124  	lock(&mheap_.speciallock)
  1125  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  1126  	unlock(&mheap_.speciallock)
  1127  }
  1128  
  1129  // The described object is being heap profiled.
  1130  type specialprofile struct {
  1131  	special special
  1132  	b       *bucket
  1133  }
  1134  
  1135  // Set the heap profile bucket associated with addr to b.
  1136  func setprofilebucket(p unsafe.Pointer, b *bucket) {
  1137  	lock(&mheap_.speciallock)
  1138  	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
  1139  	unlock(&mheap_.speciallock)
  1140  	s.special.kind = _KindSpecialProfile
  1141  	s.b = b
  1142  	if !addspecial(p, &s.special) {
  1143  		throw("setprofilebucket: profile already set")
  1144  	}
  1145  }
  1146  
  1147  // Do whatever cleanup needs to be done to deallocate s.  It has
  1148  // already been unlinked from the MSpan specials list.
  1149  func freespecial(s *special, p unsafe.Pointer, size uintptr) {
  1150  	switch s.kind {
  1151  	case _KindSpecialFinalizer:
  1152  		sf := (*specialfinalizer)(unsafe.Pointer(s))
  1153  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
  1154  		lock(&mheap_.speciallock)
  1155  		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
  1156  		unlock(&mheap_.speciallock)
  1157  	case _KindSpecialProfile:
  1158  		sp := (*specialprofile)(unsafe.Pointer(s))
  1159  		mProf_Free(sp.b, size)
  1160  		lock(&mheap_.speciallock)
  1161  		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
  1162  		unlock(&mheap_.speciallock)
  1163  	default:
  1164  		throw("bad special kind")
  1165  		panic("not reached")
  1166  	}
  1167  }