github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/mheap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.h for overview.
     8  //
     9  // When a MSpan is in the heap free list, state == MSpanFree
    10  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
    11  //
    12  // When a MSpan is allocated, state == MSpanInUse or MSpanStack
    13  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
    14  
    15  package runtime
    16  
    17  import "unsafe"
    18  
    19  var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
    20  var h_spans []*mspan    // TODO: make this h.spans once mheap can be defined in Go
    21  
    22  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
    23  	h := (*mheap)(vh)
    24  	s := (*mspan)(p)
    25  	if len(h_allspans) >= cap(h_allspans) {
    26  		n := 64 * 1024 / ptrSize
    27  		if n < cap(h_allspans)*3/2 {
    28  			n = cap(h_allspans) * 3 / 2
    29  		}
    30  		var new []*mspan
    31  		sp := (*slice)(unsafe.Pointer(&new))
    32  		sp.array = (*byte)(sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys))
    33  		if sp.array == nil {
    34  			gothrow("runtime: cannot allocate memory")
    35  		}
    36  		sp.len = uint(len(h_allspans))
    37  		sp.cap = uint(n)
    38  		if len(h_allspans) > 0 {
    39  			copy(new, h_allspans)
    40  			// Don't free the old array if it's referenced by sweep.
    41  			// See the comment in mgc0.c.
    42  			if h.allspans != mheap_.gcspans {
    43  				sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*ptrSize, &memstats.other_sys)
    44  			}
    45  		}
    46  		h_allspans = new
    47  		h.allspans = (**mspan)(unsafe.Pointer(sp.array))
    48  	}
    49  	h_allspans = append(h_allspans, s)
    50  	h.nspan = uint32(len(h_allspans))
    51  }
    52  
    53  // Initialize the heap.
    54  func mHeap_Init(h *mheap, spans_size uintptr) {
    55  	fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
    56  	fixAlloc_Init(&h.cachealloc, unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
    57  	fixAlloc_Init(&h.specialfinalizeralloc, unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
    58  	fixAlloc_Init(&h.specialprofilealloc, unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
    59  
    60  	// h->mapcache needs no init
    61  	for i := range h.free {
    62  		mSpanList_Init(&h.free[i])
    63  		mSpanList_Init(&h.busy[i])
    64  	}
    65  
    66  	mSpanList_Init(&h.freelarge)
    67  	mSpanList_Init(&h.busylarge)
    68  	for i := range h.central {
    69  		mCentral_Init(&h.central[i].mcentral, int32(i))
    70  	}
    71  
    72  	sp := (*slice)(unsafe.Pointer(&h_spans))
    73  	sp.array = (*byte)(unsafe.Pointer(h.spans))
    74  	sp.len = uint(spans_size / ptrSize)
    75  	sp.cap = uint(spans_size / ptrSize)
    76  }
    77  
    78  func mHeap_MapSpans(h *mheap) {
    79  	// Map spans array, PageSize at a time.
    80  	n := uintptr(unsafe.Pointer(h.arena_used))
    81  	n -= uintptr(unsafe.Pointer(h.arena_start))
    82  	n = n / _PageSize * ptrSize
    83  	n = round(n, _PhysPageSize)
    84  	if h.spans_mapped >= n {
    85  		return
    86  	}
    87  	sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys)
    88  	h.spans_mapped = n
    89  }
    90  
    91  // Sweeps spans in list until reclaims at least npages into heap.
    92  // Returns the actual number of pages reclaimed.
    93  func mHeap_ReclaimList(h *mheap, list *mspan, npages uintptr) uintptr {
    94  	n := uintptr(0)
    95  	sg := mheap_.sweepgen
    96  retry:
    97  	for s := list.next; s != list; s = s.next {
    98  		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
    99  			mSpanList_Remove(s)
   100  			// swept spans are at the end of the list
   101  			mSpanList_InsertBack(list, s)
   102  			unlock(&h.lock)
   103  			if mSpan_Sweep(s, false) {
   104  				// TODO(rsc,dvyukov): This is probably wrong.
   105  				// It is undercounting the number of pages reclaimed.
   106  				// See golang.org/issue/9048.
   107  				// Note that if we want to add the true count of s's pages,
   108  				// we must record that before calling mSpan_Sweep,
   109  				// because if mSpan_Sweep returns true the span has
   110  				// been
   111  				n++
   112  			}
   113  			lock(&h.lock)
   114  			if n >= npages {
   115  				return n
   116  			}
   117  			// the span could have been moved elsewhere
   118  			goto retry
   119  		}
   120  		if s.sweepgen == sg-1 {
   121  			// the span is being sweept by background sweeper, skip
   122  			continue
   123  		}
   124  		// already swept empty span,
   125  		// all subsequent ones must also be either swept or in process of sweeping
   126  		break
   127  	}
   128  	return n
   129  }
   130  
   131  // Sweeps and reclaims at least npage pages into heap.
   132  // Called before allocating npage pages.
   133  func mHeap_Reclaim(h *mheap, npage uintptr) {
   134  	// First try to sweep busy spans with large objects of size >= npage,
   135  	// this has good chances of reclaiming the necessary space.
   136  	for i := int(npage); i < len(h.busy); i++ {
   137  		if mHeap_ReclaimList(h, &h.busy[i], npage) != 0 {
   138  			return // Bingo!
   139  		}
   140  	}
   141  
   142  	// Then -- even larger objects.
   143  	if mHeap_ReclaimList(h, &h.busylarge, npage) != 0 {
   144  		return // Bingo!
   145  	}
   146  
   147  	// Now try smaller objects.
   148  	// One such object is not enough, so we need to reclaim several of them.
   149  	reclaimed := uintptr(0)
   150  	for i := 0; i < int(npage) && i < len(h.busy); i++ {
   151  		reclaimed += mHeap_ReclaimList(h, &h.busy[i], npage-reclaimed)
   152  		if reclaimed >= npage {
   153  			return
   154  		}
   155  	}
   156  
   157  	// Now sweep everything that is not yet swept.
   158  	unlock(&h.lock)
   159  	for {
   160  		n := sweepone()
   161  		if n == ^uintptr(0) { // all spans are swept
   162  			break
   163  		}
   164  		reclaimed += n
   165  		if reclaimed >= npage {
   166  			break
   167  		}
   168  	}
   169  	lock(&h.lock)
   170  }
   171  
   172  // Allocate a new span of npage pages from the heap for GC'd memory
   173  // and record its size class in the HeapMap and HeapMapCache.
   174  func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
   175  	_g_ := getg()
   176  	if _g_ != _g_.m.g0 {
   177  		gothrow("_mheap_alloc not on g0 stack")
   178  	}
   179  	lock(&h.lock)
   180  
   181  	// To prevent excessive heap growth, before allocating n pages
   182  	// we need to sweep and reclaim at least n pages.
   183  	if h.sweepdone == 0 {
   184  		mHeap_Reclaim(h, npage)
   185  	}
   186  
   187  	// transfer stats from cache to global
   188  	memstats.heap_alloc += uint64(_g_.m.mcache.local_cachealloc)
   189  	_g_.m.mcache.local_cachealloc = 0
   190  	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
   191  	_g_.m.mcache.local_tinyallocs = 0
   192  
   193  	s := mHeap_AllocSpanLocked(h, npage)
   194  	if s != nil {
   195  		// Record span info, because gc needs to be
   196  		// able to map interior pointer to containing span.
   197  		atomicstore(&s.sweepgen, h.sweepgen)
   198  		s.state = _MSpanInUse
   199  		s.freelist = 0
   200  		s.ref = 0
   201  		s.sizeclass = uint8(sizeclass)
   202  		if sizeclass == 0 {
   203  			s.elemsize = s.npages << _PageShift
   204  		} else {
   205  			s.elemsize = uintptr(class_to_size[sizeclass])
   206  		}
   207  
   208  		// update stats, sweep lists
   209  		if large {
   210  			memstats.heap_objects++
   211  			memstats.heap_alloc += uint64(npage << _PageShift)
   212  			// Swept spans are at the end of lists.
   213  			if s.npages < uintptr(len(h.free)) {
   214  				mSpanList_InsertBack(&h.busy[s.npages], s)
   215  			} else {
   216  				mSpanList_InsertBack(&h.busylarge, s)
   217  			}
   218  		}
   219  	}
   220  	unlock(&h.lock)
   221  	return s
   222  }
   223  
   224  func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
   225  	// Don't do any operations that lock the heap on the G stack.
   226  	// It might trigger stack growth, and the stack growth code needs
   227  	// to be able to allocate heap.
   228  	var s *mspan
   229  	systemstack(func() {
   230  		s = mHeap_Alloc_m(h, npage, sizeclass, large)
   231  	})
   232  
   233  	if s != nil {
   234  		if needzero && s.needzero != 0 {
   235  			memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
   236  		}
   237  		s.needzero = 0
   238  	}
   239  	return s
   240  }
   241  
   242  func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
   243  	_g_ := getg()
   244  	if _g_ != _g_.m.g0 {
   245  		gothrow("mheap_allocstack not on g0 stack")
   246  	}
   247  	lock(&h.lock)
   248  	s := mHeap_AllocSpanLocked(h, npage)
   249  	if s != nil {
   250  		s.state = _MSpanStack
   251  		s.freelist = 0
   252  		s.ref = 0
   253  		memstats.stacks_inuse += uint64(s.npages << _PageShift)
   254  	}
   255  	unlock(&h.lock)
   256  	return s
   257  }
   258  
   259  // Allocates a span of the given size.  h must be locked.
   260  // The returned span has been removed from the
   261  // free list, but its state is still MSpanFree.
   262  func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
   263  	var s *mspan
   264  
   265  	// Try in fixed-size lists up to max.
   266  	for i := int(npage); i < len(h.free); i++ {
   267  		if !mSpanList_IsEmpty(&h.free[i]) {
   268  			s = h.free[i].next
   269  			goto HaveSpan
   270  		}
   271  	}
   272  
   273  	// Best fit in list of large spans.
   274  	s = mHeap_AllocLarge(h, npage)
   275  	if s == nil {
   276  		if !mHeap_Grow(h, npage) {
   277  			return nil
   278  		}
   279  		s = mHeap_AllocLarge(h, npage)
   280  		if s == nil {
   281  			return nil
   282  		}
   283  	}
   284  
   285  HaveSpan:
   286  	// Mark span in use.
   287  	if s.state != _MSpanFree {
   288  		gothrow("MHeap_AllocLocked - MSpan not free")
   289  	}
   290  	if s.npages < npage {
   291  		gothrow("MHeap_AllocLocked - bad npages")
   292  	}
   293  	mSpanList_Remove(s)
   294  	if s.next != nil || s.prev != nil {
   295  		gothrow("still in list")
   296  	}
   297  	if s.npreleased > 0 {
   298  		sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
   299  		memstats.heap_released -= uint64(s.npreleased << _PageShift)
   300  		s.npreleased = 0
   301  	}
   302  
   303  	if s.npages > npage {
   304  		// Trim extra and put it back in the heap.
   305  		t := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
   306  		mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
   307  		s.npages = npage
   308  		p := uintptr(t.start)
   309  		p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   310  		if p > 0 {
   311  			h_spans[p-1] = s
   312  		}
   313  		h_spans[p] = t
   314  		h_spans[p+t.npages-1] = t
   315  		t.needzero = s.needzero
   316  		s.state = _MSpanStack // prevent coalescing with s
   317  		t.state = _MSpanStack
   318  		mHeap_FreeSpanLocked(h, t, false, false)
   319  		t.unusedsince = s.unusedsince // preserve age (TODO: wrong: t is possibly merged and/or deallocated at this point)
   320  		s.state = _MSpanFree
   321  	}
   322  	s.unusedsince = 0
   323  
   324  	p := uintptr(s.start)
   325  	p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   326  	for n := uintptr(0); n < npage; n++ {
   327  		h_spans[p+n] = s
   328  	}
   329  
   330  	memstats.heap_inuse += uint64(npage << _PageShift)
   331  	memstats.heap_idle -= uint64(npage << _PageShift)
   332  
   333  	//println("spanalloc", hex(s.start<<_PageShift))
   334  	if s.next != nil || s.prev != nil {
   335  		gothrow("still in list")
   336  	}
   337  	return s
   338  }
   339  
   340  // Allocate a span of exactly npage pages from the list of large spans.
   341  func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
   342  	return bestFit(&h.freelarge, npage, nil)
   343  }
   344  
   345  // Search list for smallest span with >= npage pages.
   346  // If there are multiple smallest spans, take the one
   347  // with the earliest starting address.
   348  func bestFit(list *mspan, npage uintptr, best *mspan) *mspan {
   349  	for s := list.next; s != list; s = s.next {
   350  		if s.npages < npage {
   351  			continue
   352  		}
   353  		if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) {
   354  			best = s
   355  		}
   356  	}
   357  	return best
   358  }
   359  
   360  // Try to add at least npage pages of memory to the heap,
   361  // returning whether it worked.
   362  func mHeap_Grow(h *mheap, npage uintptr) bool {
   363  	// Ask for a big chunk, to reduce the number of mappings
   364  	// the operating system needs to track; also amortizes
   365  	// the overhead of an operating system mapping.
   366  	// Allocate a multiple of 64kB.
   367  	npage = round(npage, (64<<10)/_PageSize)
   368  	ask := npage << _PageShift
   369  	if ask < _HeapAllocChunk {
   370  		ask = _HeapAllocChunk
   371  	}
   372  
   373  	v := mHeap_SysAlloc(h, ask)
   374  	if v == nil {
   375  		if ask > npage<<_PageShift {
   376  			ask = npage << _PageShift
   377  			v = mHeap_SysAlloc(h, ask)
   378  		}
   379  		if v == nil {
   380  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
   381  			return false
   382  		}
   383  	}
   384  
   385  	// Create a fake "in use" span and free it, so that the
   386  	// right coalescing happens.
   387  	s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
   388  	mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
   389  	p := uintptr(s.start)
   390  	p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
   391  	h_spans[p] = s
   392  	h_spans[p+s.npages-1] = s
   393  	atomicstore(&s.sweepgen, h.sweepgen)
   394  	s.state = _MSpanInUse
   395  	mHeap_FreeSpanLocked(h, s, false, true)
   396  	return true
   397  }
   398  
   399  // Look up the span at the given address.
   400  // Address is guaranteed to be in map
   401  // and is guaranteed to be start or end of span.
   402  func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
   403  	p := uintptr(v)
   404  	p -= uintptr(unsafe.Pointer(h.arena_start))
   405  	return h_spans[p>>_PageShift]
   406  }
   407  
   408  // Look up the span at the given address.
   409  // Address is *not* guaranteed to be in map
   410  // and may be anywhere in the span.
   411  // Map entries for the middle of a span are only
   412  // valid for allocated spans.  Free spans may have
   413  // other garbage in their middles, so we have to
   414  // check for that.
   415  func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
   416  	if uintptr(v) < uintptr(unsafe.Pointer(h.arena_start)) || uintptr(v) >= uintptr(unsafe.Pointer(h.arena_used)) {
   417  		return nil
   418  	}
   419  	p := uintptr(v) >> _PageShift
   420  	q := p
   421  	q -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
   422  	s := h_spans[q]
   423  	if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
   424  		return nil
   425  	}
   426  	return s
   427  }
   428  
   429  // Free the span back into the heap.
   430  func mHeap_Free(h *mheap, s *mspan, acct int32) {
   431  	systemstack(func() {
   432  		mp := getg().m
   433  		lock(&h.lock)
   434  		memstats.heap_alloc += uint64(mp.mcache.local_cachealloc)
   435  		mp.mcache.local_cachealloc = 0
   436  		memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
   437  		mp.mcache.local_tinyallocs = 0
   438  		if acct != 0 {
   439  			memstats.heap_alloc -= uint64(s.npages << _PageShift)
   440  			memstats.heap_objects--
   441  		}
   442  		mHeap_FreeSpanLocked(h, s, true, true)
   443  		unlock(&h.lock)
   444  	})
   445  }
   446  
   447  func mHeap_FreeStack(h *mheap, s *mspan) {
   448  	_g_ := getg()
   449  	if _g_ != _g_.m.g0 {
   450  		gothrow("mheap_freestack not on g0 stack")
   451  	}
   452  	s.needzero = 1
   453  	lock(&h.lock)
   454  	memstats.stacks_inuse -= uint64(s.npages << _PageShift)
   455  	mHeap_FreeSpanLocked(h, s, true, true)
   456  	unlock(&h.lock)
   457  }
   458  
   459  func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool) {
   460  	switch s.state {
   461  	case _MSpanStack:
   462  		if s.ref != 0 {
   463  			gothrow("MHeap_FreeSpanLocked - invalid stack free")
   464  		}
   465  	case _MSpanInUse:
   466  		if s.ref != 0 || s.sweepgen != h.sweepgen {
   467  			print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
   468  			gothrow("MHeap_FreeSpanLocked - invalid free")
   469  		}
   470  	default:
   471  		gothrow("MHeap_FreeSpanLocked - invalid span state")
   472  	}
   473  
   474  	if acctinuse {
   475  		memstats.heap_inuse -= uint64(s.npages << _PageShift)
   476  	}
   477  	if acctidle {
   478  		memstats.heap_idle += uint64(s.npages << _PageShift)
   479  	}
   480  	s.state = _MSpanFree
   481  	mSpanList_Remove(s)
   482  
   483  	// Stamp newly unused spans. The scavenger will use that
   484  	// info to potentially give back some pages to the OS.
   485  	s.unusedsince = nanotime()
   486  	s.npreleased = 0
   487  
   488  	// Coalesce with earlier, later spans.
   489  	p := uintptr(s.start)
   490  	p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
   491  	if p > 0 {
   492  		t := h_spans[p-1]
   493  		if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
   494  			s.start = t.start
   495  			s.npages += t.npages
   496  			s.npreleased = t.npreleased // absorb released pages
   497  			s.needzero |= t.needzero
   498  			p -= t.npages
   499  			h_spans[p] = s
   500  			mSpanList_Remove(t)
   501  			t.state = _MSpanDead
   502  			fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
   503  		}
   504  	}
   505  	if (p+s.npages)*ptrSize < h.spans_mapped {
   506  		t := h_spans[p+s.npages]
   507  		if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
   508  			s.npages += t.npages
   509  			s.npreleased += t.npreleased
   510  			s.needzero |= t.needzero
   511  			h_spans[p+s.npages-1] = s
   512  			mSpanList_Remove(t)
   513  			t.state = _MSpanDead
   514  			fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
   515  		}
   516  	}
   517  
   518  	// Insert s into appropriate list.
   519  	if s.npages < uintptr(len(h.free)) {
   520  		mSpanList_Insert(&h.free[s.npages], s)
   521  	} else {
   522  		mSpanList_Insert(&h.freelarge, s)
   523  	}
   524  }
   525  
   526  func scavengelist(list *mspan, now, limit uint64) uintptr {
   527  	if mSpanList_IsEmpty(list) {
   528  		return 0
   529  	}
   530  
   531  	var sumreleased uintptr
   532  	for s := list.next; s != list; s = s.next {
   533  		if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
   534  			released := (s.npages - s.npreleased) << _PageShift
   535  			memstats.heap_released += uint64(released)
   536  			sumreleased += released
   537  			s.npreleased = s.npages
   538  			sysUnused((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
   539  		}
   540  	}
   541  	return sumreleased
   542  }
   543  
   544  func mHeap_Scavenge(k int32, now, limit uint64) {
   545  	h := &mheap_
   546  	lock(&h.lock)
   547  	var sumreleased uintptr
   548  	for i := 0; i < len(h.free); i++ {
   549  		sumreleased += scavengelist(&h.free[i], now, limit)
   550  	}
   551  	sumreleased += scavengelist(&h.freelarge, now, limit)
   552  	unlock(&h.lock)
   553  
   554  	if debug.gctrace > 0 {
   555  		if sumreleased > 0 {
   556  			print("scvg", k, ": ", sumreleased>>20, " MB released\n")
   557  		}
   558  		// TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
   559  		// But we can't call ReadMemStats on g0 holding locks.
   560  		print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
   561  	}
   562  }
   563  
   564  func scavenge_m() {
   565  	mHeap_Scavenge(-1, ^uint64(0), 0)
   566  }
   567  
   568  // Initialize a new span with the given start and npages.
   569  func mSpan_Init(span *mspan, start pageID, npages uintptr) {
   570  	span.next = nil
   571  	span.prev = nil
   572  	span.start = start
   573  	span.npages = npages
   574  	span.freelist = 0
   575  	span.ref = 0
   576  	span.sizeclass = 0
   577  	span.incache = false
   578  	span.elemsize = 0
   579  	span.state = _MSpanDead
   580  	span.unusedsince = 0
   581  	span.npreleased = 0
   582  	span.speciallock.key = 0
   583  	span.specials = nil
   584  	span.needzero = 0
   585  }
   586  
   587  // Initialize an empty doubly-linked list.
   588  func mSpanList_Init(list *mspan) {
   589  	list.state = _MSpanListHead
   590  	list.next = list
   591  	list.prev = list
   592  }
   593  
   594  func mSpanList_Remove(span *mspan) {
   595  	if span.prev == nil && span.next == nil {
   596  		return
   597  	}
   598  	span.prev.next = span.next
   599  	span.next.prev = span.prev
   600  	span.prev = nil
   601  	span.next = nil
   602  }
   603  
   604  func mSpanList_IsEmpty(list *mspan) bool {
   605  	return list.next == list
   606  }
   607  
   608  func mSpanList_Insert(list *mspan, span *mspan) {
   609  	if span.next != nil || span.prev != nil {
   610  		println("failed MSpanList_Insert", span, span.next, span.prev)
   611  		gothrow("MSpanList_Insert")
   612  	}
   613  	span.next = list.next
   614  	span.prev = list
   615  	span.next.prev = span
   616  	span.prev.next = span
   617  }
   618  
   619  func mSpanList_InsertBack(list *mspan, span *mspan) {
   620  	if span.next != nil || span.prev != nil {
   621  		println("failed MSpanList_InsertBack", span, span.next, span.prev)
   622  		gothrow("MSpanList_InsertBack")
   623  	}
   624  	span.next = list
   625  	span.prev = list.prev
   626  	span.next.prev = span
   627  	span.prev.next = span
   628  }
   629  
   630  // Adds the special record s to the list of special records for
   631  // the object p.  All fields of s should be filled in except for
   632  // offset & next, which this routine will fill in.
   633  // Returns true if the special was successfully added, false otherwise.
   634  // (The add will fail only if a record with the same p and s->kind
   635  //  already exists.)
   636  func addspecial(p unsafe.Pointer, s *special) bool {
   637  	span := mHeap_LookupMaybe(&mheap_, p)
   638  	if span == nil {
   639  		gothrow("addspecial on invalid pointer")
   640  	}
   641  
   642  	// Ensure that the span is swept.
   643  	// GC accesses specials list w/o locks. And it's just much safer.
   644  	mp := acquirem()
   645  	mSpan_EnsureSwept(span)
   646  
   647  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
   648  	kind := s.kind
   649  
   650  	lock(&span.speciallock)
   651  
   652  	// Find splice point, check for existing record.
   653  	t := &span.specials
   654  	for {
   655  		x := *t
   656  		if x == nil {
   657  			break
   658  		}
   659  		if offset == uintptr(x.offset) && kind == x.kind {
   660  			unlock(&span.speciallock)
   661  			releasem(mp)
   662  			return false // already exists
   663  		}
   664  		if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
   665  			break
   666  		}
   667  		t = &x.next
   668  	}
   669  
   670  	// Splice in record, fill in offset.
   671  	s.offset = uint16(offset)
   672  	s.next = *t
   673  	*t = s
   674  	unlock(&span.speciallock)
   675  	releasem(mp)
   676  
   677  	return true
   678  }
   679  
   680  // Removes the Special record of the given kind for the object p.
   681  // Returns the record if the record existed, nil otherwise.
   682  // The caller must FixAlloc_Free the result.
   683  func removespecial(p unsafe.Pointer, kind uint8) *special {
   684  	span := mHeap_LookupMaybe(&mheap_, p)
   685  	if span == nil {
   686  		gothrow("removespecial on invalid pointer")
   687  	}
   688  
   689  	// Ensure that the span is swept.
   690  	// GC accesses specials list w/o locks. And it's just much safer.
   691  	mp := acquirem()
   692  	mSpan_EnsureSwept(span)
   693  
   694  	offset := uintptr(p) - uintptr(span.start<<_PageShift)
   695  
   696  	lock(&span.speciallock)
   697  	t := &span.specials
   698  	for {
   699  		s := *t
   700  		if s == nil {
   701  			break
   702  		}
   703  		// This function is used for finalizers only, so we don't check for
   704  		// "interior" specials (p must be exactly equal to s->offset).
   705  		if offset == uintptr(s.offset) && kind == s.kind {
   706  			*t = s.next
   707  			unlock(&span.speciallock)
   708  			releasem(mp)
   709  			return s
   710  		}
   711  		t = &s.next
   712  	}
   713  	unlock(&span.speciallock)
   714  	releasem(mp)
   715  	return nil
   716  }
   717  
   718  // Adds a finalizer to the object p.  Returns true if it succeeded.
   719  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
   720  	lock(&mheap_.speciallock)
   721  	s := (*specialfinalizer)(fixAlloc_Alloc(&mheap_.specialfinalizeralloc))
   722  	unlock(&mheap_.speciallock)
   723  	s.special.kind = _KindSpecialFinalizer
   724  	s.fn = f
   725  	s.nret = nret
   726  	s.fint = fint
   727  	s.ot = ot
   728  	if addspecial(p, &s.special) {
   729  		return true
   730  	}
   731  
   732  	// There was an old finalizer
   733  	lock(&mheap_.speciallock)
   734  	fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
   735  	unlock(&mheap_.speciallock)
   736  	return false
   737  }
   738  
   739  // Removes the finalizer (if any) from the object p.
   740  func removefinalizer(p unsafe.Pointer) {
   741  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
   742  	if s == nil {
   743  		return // there wasn't a finalizer to remove
   744  	}
   745  	lock(&mheap_.speciallock)
   746  	fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
   747  	unlock(&mheap_.speciallock)
   748  }
   749  
   750  // Set the heap profile bucket associated with addr to b.
   751  func setprofilebucket(p unsafe.Pointer, b *bucket) {
   752  	lock(&mheap_.speciallock)
   753  	s := (*specialprofile)(fixAlloc_Alloc(&mheap_.specialprofilealloc))
   754  	unlock(&mheap_.speciallock)
   755  	s.special.kind = _KindSpecialProfile
   756  	s.b = b
   757  	if !addspecial(p, &s.special) {
   758  		gothrow("setprofilebucket: profile already set")
   759  	}
   760  }
   761  
   762  // Do whatever cleanup needs to be done to deallocate s.  It has
   763  // already been unlinked from the MSpan specials list.
   764  // Returns true if we should keep working on deallocating p.
   765  func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool {
   766  	switch s.kind {
   767  	case _KindSpecialFinalizer:
   768  		sf := (*specialfinalizer)(unsafe.Pointer(s))
   769  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
   770  		lock(&mheap_.speciallock)
   771  		fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(sf))
   772  		unlock(&mheap_.speciallock)
   773  		return false // don't free p until finalizer is done
   774  	case _KindSpecialProfile:
   775  		sp := (*specialprofile)(unsafe.Pointer(s))
   776  		mProf_Free(sp.b, size, freed)
   777  		lock(&mheap_.speciallock)
   778  		fixAlloc_Free(&mheap_.specialprofilealloc, (unsafe.Pointer)(sp))
   779  		unlock(&mheap_.speciallock)
   780  		return true
   781  	default:
   782  		gothrow("bad special kind")
   783  		panic("not reached")
   784  	}
   785  }