github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/malloc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan
    60  //	   is now "idle", so it is returned to the mheap and no longer
    61  //	   has a size class.
    62  //	   This may coalesce it with adjacent idle mspans.
    63  //
    64  //	4. If an mspan remains idle for long enough, return its pages
    65  //	   to the operating system.
    66  //
    67  // Allocating and freeing a large object uses the mheap
    68  // directly, bypassing the mcache and mcentral.
    69  //
    70  // Free object slots in an mspan are zeroed only if mspan.needzero is
    71  // false. If needzero is true, objects are zeroed as they are
    72  // allocated. There are various benefits to delaying zeroing this way:
    73  //
    74  //	1. Stack frame allocation can avoid zeroing altogether.
    75  //
    76  //	2. It exhibits better temporal locality, since the program is
    77  //	   probably about to write to the memory.
    78  //
    79  //	3. We don't zero pages that never get reused.
    80  
    81  package runtime
    82  
    83  import (
    84  	"runtime/internal/sys"
    85  	"unsafe"
    86  )
    87  
    88  const (
    89  	debugMalloc = false
    90  
    91  	maxTinySize   = _TinySize
    92  	tinySizeClass = _TinySizeClass
    93  	maxSmallSize  = _MaxSmallSize
    94  
    95  	pageShift = _PageShift
    96  	pageSize  = _PageSize
    97  	pageMask  = _PageMask
    98  	// By construction, single page spans of the smallest object class
    99  	// have the most objects per span.
   100  	maxObjsPerSpan = pageSize / 8
   101  
   102  	mSpanInUse = _MSpanInUse
   103  
   104  	concurrentSweep = _ConcurrentSweep
   105  
   106  	_PageSize = 1 << _PageShift
   107  	_PageMask = _PageSize - 1
   108  
   109  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   110  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   111  
   112  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   113  	_TinySize      = 16
   114  	_TinySizeClass = 2
   115  
   116  	_FixAllocChunk  = 16 << 10               // Chunk size for FixAlloc
   117  	_MaxMHeapList   = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
   118  	_HeapAllocChunk = 1 << 20                // Chunk size for heap growth
   119  
   120  	// Per-P, per order stack segment cache size.
   121  	_StackCacheSize = 32 * 1024
   122  
   123  	// Number of orders that get caching. Order 0 is FixedStack
   124  	// and each successive order is twice as large.
   125  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   126  	// will be allocated directly.
   127  	// Since FixedStack is different on different systems, we
   128  	// must vary NumStackOrders to keep the same maximum cached size.
   129  	//   OS               | FixedStack | NumStackOrders
   130  	//   -----------------+------------+---------------
   131  	//   linux/darwin/bsd | 2KB        | 4
   132  	//   windows/32       | 4KB        | 3
   133  	//   windows/64       | 8KB        | 2
   134  	//   plan9            | 4KB        | 3
   135  	_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
   136  
   137  	// Number of bits in page to span calculations (4k pages).
   138  	// On Windows 64-bit we limit the arena to 32GB or 35 bits.
   139  	// Windows counts memory used by page table into committed memory
   140  	// of the process, so we can't reserve too much memory.
   141  	// See https://golang.org/issue/5402 and https://golang.org/issue/5236.
   142  	// On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
   143  	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
   144  	// On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
   145  	// but as most devices have less than 4GB of physical memory anyway, we
   146  	// try to be conservative here, and only ask for a 2GB heap.
   147  	_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
   148  	_MHeapMap_Bits      = _MHeapMap_TotalBits - _PageShift
   149  
   150  	_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
   151  
   152  	// Max number of threads to run garbage collection.
   153  	// 2, 3, and 4 are all plausible maximums depending
   154  	// on the hardware details of the machine. The garbage
   155  	// collector scales well to 32 cpus.
   156  	_MaxGcproc = 32
   157  
   158  	_MaxArena32 = 1<<32 - 1
   159  )
   160  
   161  // physPageSize is the size in bytes of the OS's physical pages.
   162  // Mapping and unmapping operations must be done at multiples of
   163  // physPageSize.
   164  //
   165  // This must be set by the OS init code (typically in osinit) before
   166  // mallocinit.
   167  var physPageSize uintptr
   168  
   169  // OS-defined helpers:
   170  //
   171  // sysAlloc obtains a large chunk of zeroed memory from the
   172  // operating system, typically on the order of a hundred kilobytes
   173  // or a megabyte.
   174  // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
   175  // may use larger alignment, so the caller must be careful to realign the
   176  // memory obtained by sysAlloc.
   177  //
   178  // SysUnused notifies the operating system that the contents
   179  // of the memory region are no longer needed and can be reused
   180  // for other purposes.
   181  // SysUsed notifies the operating system that the contents
   182  // of the memory region are needed again.
   183  //
   184  // SysFree returns it unconditionally; this is only used if
   185  // an out-of-memory error has been detected midway through
   186  // an allocation. It is okay if SysFree is a no-op.
   187  //
   188  // SysReserve reserves address space without allocating memory.
   189  // If the pointer passed to it is non-nil, the caller wants the
   190  // reservation there, but SysReserve can still choose another
   191  // location if that one is unavailable. On some systems and in some
   192  // cases SysReserve will simply check that the address space is
   193  // available and not actually reserve it. If SysReserve returns
   194  // non-nil, it sets *reserved to true if the address space is
   195  // reserved, false if it has merely been checked.
   196  // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
   197  // may use larger alignment, so the caller must be careful to realign the
   198  // memory obtained by sysAlloc.
   199  //
   200  // SysMap maps previously reserved address space for use.
   201  // The reserved argument is true if the address space was really
   202  // reserved, not merely checked.
   203  //
   204  // SysFault marks a (already sysAlloc'd) region to fault
   205  // if accessed. Used only for debugging the runtime.
   206  
   207  func mallocinit() {
   208  	if class_to_size[_TinySizeClass] != _TinySize {
   209  		throw("bad TinySizeClass")
   210  	}
   211  
   212  	testdefersizes()
   213  
   214  	// Copy class sizes out for statistics table.
   215  	for i := range class_to_size {
   216  		memstats.by_size[i].size = uint32(class_to_size[i])
   217  	}
   218  
   219  	// Check physPageSize.
   220  	if physPageSize == 0 {
   221  		// The OS init code failed to fetch the physical page size.
   222  		throw("failed to get system page size")
   223  	}
   224  	if physPageSize < minPhysPageSize {
   225  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   226  		throw("bad system page size")
   227  	}
   228  	if physPageSize&(physPageSize-1) != 0 {
   229  		print("system page size (", physPageSize, ") must be a power of 2\n")
   230  		throw("bad system page size")
   231  	}
   232  
   233  	var p, bitmapSize, spansSize, pSize, limit uintptr
   234  	var reserved bool
   235  
   236  	// limit = runtime.memlimit();
   237  	// See https://golang.org/issue/5049
   238  	// TODO(rsc): Fix after 1.1.
   239  	limit = 0
   240  
   241  	// Set up the allocation arena, a contiguous area of memory where
   242  	// allocated data will be found. The arena begins with a bitmap large
   243  	// enough to hold 2 bits per allocated word.
   244  	if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
   245  		// On a 64-bit machine, allocate from a single contiguous reservation.
   246  		// 512 GB (MaxMem) should be big enough for now.
   247  		//
   248  		// The code will work with the reservation at any address, but ask
   249  		// SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
   250  		// Allocating a 512 GB region takes away 39 bits, and the amd64
   251  		// doesn't let us choose the top 17 bits, so that leaves the 9 bits
   252  		// in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
   253  		// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
   254  		// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
   255  		// UTF-8 sequences, and they are otherwise as far away from
   256  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   257  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   258  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   259  		// AddressSanitizer which reserves all memory up to 0x0100.
   260  		// These choices are both for debuggability and to reduce the
   261  		// odds of a conservative garbage collector (as is still used in gccgo)
   262  		// not collecting memory because some non-pointer block of memory
   263  		// had a bit pattern that matched a memory address.
   264  		//
   265  		// Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
   266  		// but it hardly matters: e0 00 is not valid UTF-8 either.
   267  		//
   268  		// If this fails we fall back to the 32 bit memory mechanism
   269  		//
   270  		// However, on arm64, we ignore all this advice above and slam the
   271  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   272  		// translation buffers, the user address space is limited to 39 bits
   273  		// On darwin/arm64, the address space is even smaller.
   274  		arenaSize := round(_MaxMem, _PageSize)
   275  		bitmapSize = arenaSize / (sys.PtrSize * 8 / 2)
   276  		spansSize = arenaSize / _PageSize * sys.PtrSize
   277  		spansSize = round(spansSize, _PageSize)
   278  		for i := 0; i <= 0x7f; i++ {
   279  			switch {
   280  			case GOARCH == "arm64" && GOOS == "darwin":
   281  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   282  			case GOARCH == "arm64":
   283  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   284  			default:
   285  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   286  			}
   287  			pSize = bitmapSize + spansSize + arenaSize + _PageSize
   288  			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
   289  			if p != 0 {
   290  				break
   291  			}
   292  		}
   293  	}
   294  
   295  	if p == 0 {
   296  		// On a 32-bit machine, we can't typically get away
   297  		// with a giant virtual address space reservation.
   298  		// Instead we map the memory information bitmap
   299  		// immediately after the data segment, large enough
   300  		// to handle the entire 4GB address space (256 MB),
   301  		// along with a reservation for an initial arena.
   302  		// When that gets used up, we'll start asking the kernel
   303  		// for any memory anywhere.
   304  
   305  		// If we fail to allocate, try again with a smaller arena.
   306  		// This is necessary on Android L where we share a process
   307  		// with ART, which reserves virtual memory aggressively.
   308  		// In the worst case, fall back to a 0-sized initial arena,
   309  		// in the hope that subsequent reservations will succeed.
   310  		arenaSizes := []uintptr{
   311  			512 << 20,
   312  			256 << 20,
   313  			128 << 20,
   314  			0,
   315  		}
   316  
   317  		for _, arenaSize := range arenaSizes {
   318  			bitmapSize = (_MaxArena32 + 1) / (sys.PtrSize * 8 / 2)
   319  			spansSize = (_MaxArena32 + 1) / _PageSize * sys.PtrSize
   320  			if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
   321  				bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
   322  				arenaSize = bitmapSize * 8
   323  				spansSize = arenaSize / _PageSize * sys.PtrSize
   324  			}
   325  			spansSize = round(spansSize, _PageSize)
   326  
   327  			// SysReserve treats the address we ask for, end, as a hint,
   328  			// not as an absolute requirement. If we ask for the end
   329  			// of the data segment but the operating system requires
   330  			// a little more space before we can start allocating, it will
   331  			// give out a slightly higher pointer. Except QEMU, which
   332  			// is buggy, as usual: it won't adjust the pointer upward.
   333  			// So adjust it upward a little bit ourselves: 1/4 MB to get
   334  			// away from the running binary image and then round up
   335  			// to a MB boundary.
   336  			p = round(firstmoduledata.end+(1<<18), 1<<20)
   337  			pSize = bitmapSize + spansSize + arenaSize + _PageSize
   338  			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
   339  			if p != 0 {
   340  				break
   341  			}
   342  		}
   343  		if p == 0 {
   344  			throw("runtime: cannot reserve arena virtual address space")
   345  		}
   346  	}
   347  
   348  	// PageSize can be larger than OS definition of page size,
   349  	// so SysReserve can give us a PageSize-unaligned pointer.
   350  	// To overcome this we ask for PageSize more and round up the pointer.
   351  	p1 := round(p, _PageSize)
   352  
   353  	spansStart := p1
   354  	mheap_.bitmap = p1 + spansSize + bitmapSize
   355  	if sys.PtrSize == 4 {
   356  		// Set arena_start such that we can accept memory
   357  		// reservations located anywhere in the 4GB virtual space.
   358  		mheap_.arena_start = 0
   359  	} else {
   360  		mheap_.arena_start = p1 + (spansSize + bitmapSize)
   361  	}
   362  	mheap_.arena_end = p + pSize
   363  	mheap_.arena_used = p1 + (spansSize + bitmapSize)
   364  	mheap_.arena_reserved = reserved
   365  
   366  	if mheap_.arena_start&(_PageSize-1) != 0 {
   367  		println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
   368  		throw("misrounded allocation in mallocinit")
   369  	}
   370  
   371  	// Initialize the rest of the allocator.
   372  	mheap_.init(spansStart, spansSize)
   373  	_g_ := getg()
   374  	_g_.m.mcache = allocmcache()
   375  }
   376  
   377  // sysAlloc allocates the next n bytes from the heap arena. The
   378  // returned pointer is always _PageSize aligned and between
   379  // h.arena_start and h.arena_end. sysAlloc returns nil on failure.
   380  // There is no corresponding free function.
   381  func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
   382  	if n > h.arena_end-h.arena_used {
   383  		// We are in 32-bit mode, maybe we didn't use all possible address space yet.
   384  		// Reserve some more space.
   385  		p_size := round(n+_PageSize, 256<<20)
   386  		new_end := h.arena_end + p_size // Careful: can overflow
   387  		if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxArena32 {
   388  			// TODO: It would be bad if part of the arena
   389  			// is reserved and part is not.
   390  			var reserved bool
   391  			p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
   392  			if p == 0 {
   393  				return nil
   394  			}
   395  			if p == h.arena_end {
   396  				h.arena_end = new_end
   397  				h.arena_reserved = reserved
   398  			} else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxArena32 {
   399  				// Keep everything page-aligned.
   400  				// Our pages are bigger than hardware pages.
   401  				h.arena_end = p + p_size
   402  				used := p + (-p & (_PageSize - 1))
   403  				h.mapBits(used)
   404  				h.mapSpans(used)
   405  				h.arena_used = used
   406  				h.arena_reserved = reserved
   407  			} else {
   408  				// We haven't added this allocation to
   409  				// the stats, so subtract it from a
   410  				// fake stat (but avoid underflow).
   411  				stat := uint64(p_size)
   412  				sysFree(unsafe.Pointer(p), p_size, &stat)
   413  			}
   414  		}
   415  	}
   416  
   417  	if n <= h.arena_end-h.arena_used {
   418  		// Keep taking from our reservation.
   419  		p := h.arena_used
   420  		sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
   421  		h.mapBits(p + n)
   422  		h.mapSpans(p + n)
   423  		h.arena_used = p + n
   424  		if raceenabled {
   425  			racemapshadow(unsafe.Pointer(p), n)
   426  		}
   427  
   428  		if p&(_PageSize-1) != 0 {
   429  			throw("misrounded allocation in MHeap_SysAlloc")
   430  		}
   431  		return unsafe.Pointer(p)
   432  	}
   433  
   434  	// If using 64-bit, our reservation is all we have.
   435  	if h.arena_end-h.arena_start > _MaxArena32 {
   436  		return nil
   437  	}
   438  
   439  	// On 32-bit, once the reservation is gone we can
   440  	// try to get memory at a location chosen by the OS.
   441  	p_size := round(n, _PageSize) + _PageSize
   442  	p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
   443  	if p == 0 {
   444  		return nil
   445  	}
   446  
   447  	if p < h.arena_start || p+p_size-h.arena_start > _MaxArena32 {
   448  		top := ^uintptr(0)
   449  		if top-h.arena_start-1 > _MaxArena32 {
   450  			top = h.arena_start + _MaxArena32 + 1
   451  		}
   452  		print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
   453  		sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
   454  		return nil
   455  	}
   456  
   457  	p_end := p + p_size
   458  	p += -p & (_PageSize - 1)
   459  	if p+n > h.arena_used {
   460  		h.mapBits(p + n)
   461  		h.mapSpans(p + n)
   462  		h.arena_used = p + n
   463  		if p_end > h.arena_end {
   464  			h.arena_end = p_end
   465  		}
   466  		if raceenabled {
   467  			racemapshadow(unsafe.Pointer(p), n)
   468  		}
   469  	}
   470  
   471  	if p&(_PageSize-1) != 0 {
   472  		throw("misrounded allocation in MHeap_SysAlloc")
   473  	}
   474  	return unsafe.Pointer(p)
   475  }
   476  
   477  // base address for all 0-byte allocations
   478  var zerobase uintptr
   479  
   480  // nextFreeFast returns the next free object if one is quickly available.
   481  // Otherwise it returns 0.
   482  func nextFreeFast(s *mspan) gclinkptr {
   483  	theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
   484  	if theBit < 64 {
   485  		result := s.freeindex + uintptr(theBit)
   486  		if result < s.nelems {
   487  			freeidx := result + 1
   488  			if freeidx%64 == 0 && freeidx != s.nelems {
   489  				return 0
   490  			}
   491  			s.allocCache >>= (theBit + 1)
   492  			s.freeindex = freeidx
   493  			v := gclinkptr(result*s.elemsize + s.base())
   494  			s.allocCount++
   495  			return v
   496  		}
   497  	}
   498  	return 0
   499  }
   500  
   501  // nextFree returns the next free object from the cached span if one is available.
   502  // Otherwise it refills the cache with a span with an available object and
   503  // returns that object along with a flag indicating that this was a heavy
   504  // weight allocation. If it is a heavy weight allocation the caller must
   505  // determine whether a new GC cycle needs to be started or if the GC is active
   506  // whether this goroutine needs to assist the GC.
   507  func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   508  	s = c.alloc[sizeclass]
   509  	shouldhelpgc = false
   510  	freeIndex := s.nextFreeIndex()
   511  	if freeIndex == s.nelems {
   512  		// The span is full.
   513  		if uintptr(s.allocCount) != s.nelems {
   514  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   515  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   516  		}
   517  		systemstack(func() {
   518  			c.refill(int32(sizeclass))
   519  		})
   520  		shouldhelpgc = true
   521  		s = c.alloc[sizeclass]
   522  
   523  		freeIndex = s.nextFreeIndex()
   524  	}
   525  
   526  	if freeIndex >= s.nelems {
   527  		throw("freeIndex is not valid")
   528  	}
   529  
   530  	v = gclinkptr(freeIndex*s.elemsize + s.base())
   531  	s.allocCount++
   532  	if uintptr(s.allocCount) > s.nelems {
   533  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   534  		throw("s.allocCount > s.nelems")
   535  	}
   536  	return
   537  }
   538  
   539  // Allocate an object of size bytes.
   540  // Small objects are allocated from the per-P cache's free lists.
   541  // Large objects (> 32 kB) are allocated straight from the heap.
   542  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   543  	if gcphase == _GCmarktermination {
   544  		throw("mallocgc called with gcphase == _GCmarktermination")
   545  	}
   546  
   547  	if size == 0 {
   548  		return unsafe.Pointer(&zerobase)
   549  	}
   550  
   551  	if debug.sbrk != 0 {
   552  		align := uintptr(16)
   553  		if typ != nil {
   554  			align = uintptr(typ.align)
   555  		}
   556  		return persistentalloc(size, align, &memstats.other_sys)
   557  	}
   558  
   559  	// assistG is the G to charge for this allocation, or nil if
   560  	// GC is not currently active.
   561  	var assistG *g
   562  	if gcBlackenEnabled != 0 {
   563  		// Charge the current user G for this allocation.
   564  		assistG = getg()
   565  		if assistG.m.curg != nil {
   566  			assistG = assistG.m.curg
   567  		}
   568  		// Charge the allocation against the G. We'll account
   569  		// for internal fragmentation at the end of mallocgc.
   570  		assistG.gcAssistBytes -= int64(size)
   571  
   572  		if assistG.gcAssistBytes < 0 {
   573  			// This G is in debt. Assist the GC to correct
   574  			// this before allocating. This must happen
   575  			// before disabling preemption.
   576  			gcAssistAlloc(assistG)
   577  		}
   578  	}
   579  
   580  	// Set mp.mallocing to keep from being preempted by GC.
   581  	mp := acquirem()
   582  	if mp.mallocing != 0 {
   583  		throw("malloc deadlock")
   584  	}
   585  	if mp.gsignal == getg() {
   586  		throw("malloc during signal")
   587  	}
   588  	mp.mallocing = 1
   589  
   590  	shouldhelpgc := false
   591  	dataSize := size
   592  	c := gomcache()
   593  	var x unsafe.Pointer
   594  	noscan := typ == nil || typ.kind&kindNoPointers != 0
   595  	if size <= maxSmallSize {
   596  		if noscan && size < maxTinySize {
   597  			// Tiny allocator.
   598  			//
   599  			// Tiny allocator combines several tiny allocation requests
   600  			// into a single memory block. The resulting memory block
   601  			// is freed when all subobjects are unreachable. The subobjects
   602  			// must be noscan (don't have pointers), this ensures that
   603  			// the amount of potentially wasted memory is bounded.
   604  			//
   605  			// Size of the memory block used for combining (maxTinySize) is tunable.
   606  			// Current setting is 16 bytes, which relates to 2x worst case memory
   607  			// wastage (when all but one subobjects are unreachable).
   608  			// 8 bytes would result in no wastage at all, but provides less
   609  			// opportunities for combining.
   610  			// 32 bytes provides more opportunities for combining,
   611  			// but can lead to 4x worst case wastage.
   612  			// The best case winning is 8x regardless of block size.
   613  			//
   614  			// Objects obtained from tiny allocator must not be freed explicitly.
   615  			// So when an object will be freed explicitly, we ensure that
   616  			// its size >= maxTinySize.
   617  			//
   618  			// SetFinalizer has a special case for objects potentially coming
   619  			// from tiny allocator, it such case it allows to set finalizers
   620  			// for an inner byte of a memory block.
   621  			//
   622  			// The main targets of tiny allocator are small strings and
   623  			// standalone escaping variables. On a json benchmark
   624  			// the allocator reduces number of allocations by ~12% and
   625  			// reduces heap size by ~20%.
   626  			off := c.tinyoffset
   627  			// Align tiny pointer for required (conservative) alignment.
   628  			if size&7 == 0 {
   629  				off = round(off, 8)
   630  			} else if size&3 == 0 {
   631  				off = round(off, 4)
   632  			} else if size&1 == 0 {
   633  				off = round(off, 2)
   634  			}
   635  			if off+size <= maxTinySize && c.tiny != 0 {
   636  				// The object fits into existing tiny block.
   637  				x = unsafe.Pointer(c.tiny + off)
   638  				c.tinyoffset = off + size
   639  				c.local_tinyallocs++
   640  				mp.mallocing = 0
   641  				releasem(mp)
   642  				return x
   643  			}
   644  			// Allocate a new maxTinySize block.
   645  			span := c.alloc[tinySizeClass]
   646  			v := nextFreeFast(span)
   647  			if v == 0 {
   648  				v, _, shouldhelpgc = c.nextFree(tinySizeClass)
   649  			}
   650  			x = unsafe.Pointer(v)
   651  			(*[2]uint64)(x)[0] = 0
   652  			(*[2]uint64)(x)[1] = 0
   653  			// See if we need to replace the existing tiny block with the new one
   654  			// based on amount of remaining free space.
   655  			if size < c.tinyoffset || c.tiny == 0 {
   656  				c.tiny = uintptr(x)
   657  				c.tinyoffset = size
   658  			}
   659  			size = maxTinySize
   660  		} else {
   661  			var sizeclass uint8
   662  			if size <= smallSizeMax-8 {
   663  				sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv]
   664  			} else {
   665  				sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
   666  			}
   667  			size = uintptr(class_to_size[sizeclass])
   668  			span := c.alloc[sizeclass]
   669  			v := nextFreeFast(span)
   670  			if v == 0 {
   671  				v, span, shouldhelpgc = c.nextFree(sizeclass)
   672  			}
   673  			x = unsafe.Pointer(v)
   674  			if needzero && span.needzero != 0 {
   675  				memclrNoHeapPointers(unsafe.Pointer(v), size)
   676  			}
   677  		}
   678  	} else {
   679  		var s *mspan
   680  		shouldhelpgc = true
   681  		systemstack(func() {
   682  			s = largeAlloc(size, needzero)
   683  		})
   684  		s.freeindex = 1
   685  		s.allocCount = 1
   686  		x = unsafe.Pointer(s.base())
   687  		size = s.elemsize
   688  	}
   689  
   690  	var scanSize uintptr
   691  	if noscan {
   692  		heapBitsSetTypeNoScan(uintptr(x))
   693  	} else {
   694  		// If allocating a defer+arg block, now that we've picked a malloc size
   695  		// large enough to hold everything, cut the "asked for" size down to
   696  		// just the defer header, so that the GC bitmap will record the arg block
   697  		// as containing nothing at all (as if it were unused space at the end of
   698  		// a malloc block caused by size rounding).
   699  		// The defer arg areas are scanned as part of scanstack.
   700  		if typ == deferType {
   701  			dataSize = unsafe.Sizeof(_defer{})
   702  		}
   703  		heapBitsSetType(uintptr(x), size, dataSize, typ)
   704  		if dataSize > typ.size {
   705  			// Array allocation. If there are any
   706  			// pointers, GC has to scan to the last
   707  			// element.
   708  			if typ.ptrdata != 0 {
   709  				scanSize = dataSize - typ.size + typ.ptrdata
   710  			}
   711  		} else {
   712  			scanSize = typ.ptrdata
   713  		}
   714  		c.local_scan += scanSize
   715  	}
   716  
   717  	// Ensure that the stores above that initialize x to
   718  	// type-safe memory and set the heap bits occur before
   719  	// the caller can make x observable to the garbage
   720  	// collector. Otherwise, on weakly ordered machines,
   721  	// the garbage collector could follow a pointer to x,
   722  	// but see uninitialized memory or stale heap bits.
   723  	publicationBarrier()
   724  
   725  	// Allocate black during GC.
   726  	// All slots hold nil so no scanning is needed.
   727  	// This may be racing with GC so do it atomically if there can be
   728  	// a race marking the bit.
   729  	if gcphase != _GCoff {
   730  		gcmarknewobject(uintptr(x), size, scanSize)
   731  	}
   732  
   733  	if raceenabled {
   734  		racemalloc(x, size)
   735  	}
   736  
   737  	if msanenabled {
   738  		msanmalloc(x, size)
   739  	}
   740  
   741  	mp.mallocing = 0
   742  	releasem(mp)
   743  
   744  	if debug.allocfreetrace != 0 {
   745  		tracealloc(x, size, typ)
   746  	}
   747  
   748  	if rate := MemProfileRate; rate > 0 {
   749  		if size < uintptr(rate) && int32(size) < c.next_sample {
   750  			c.next_sample -= int32(size)
   751  		} else {
   752  			mp := acquirem()
   753  			profilealloc(mp, x, size)
   754  			releasem(mp)
   755  		}
   756  	}
   757  
   758  	if assistG != nil {
   759  		// Account for internal fragmentation in the assist
   760  		// debt now that we know it.
   761  		assistG.gcAssistBytes -= int64(size - dataSize)
   762  	}
   763  
   764  	if shouldhelpgc && gcShouldStart(false) {
   765  		gcStart(gcBackgroundMode, false)
   766  	}
   767  
   768  	return x
   769  }
   770  
   771  func largeAlloc(size uintptr, needzero bool) *mspan {
   772  	// print("largeAlloc size=", size, "\n")
   773  
   774  	if size+_PageSize < size {
   775  		throw("out of memory")
   776  	}
   777  	npages := size >> _PageShift
   778  	if size&_PageMask != 0 {
   779  		npages++
   780  	}
   781  
   782  	// Deduct credit for this span allocation and sweep if
   783  	// necessary. mHeap_Alloc will also sweep npages, so this only
   784  	// pays the debt down to npage pages.
   785  	deductSweepCredit(npages*_PageSize, npages)
   786  
   787  	s := mheap_.alloc(npages, 0, true, needzero)
   788  	if s == nil {
   789  		throw("out of memory")
   790  	}
   791  	s.limit = s.base() + size
   792  	heapBitsForSpan(s.base()).initSpan(s)
   793  	return s
   794  }
   795  
   796  // implementation of new builtin
   797  // compiler (both frontend and SSA backend) knows the signature
   798  // of this function
   799  func newobject(typ *_type) unsafe.Pointer {
   800  	return mallocgc(typ.size, typ, true)
   801  }
   802  
   803  //go:linkname reflect_unsafe_New reflect.unsafe_New
   804  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
   805  	return newobject(typ)
   806  }
   807  
   808  // newarray allocates an array of n elements of type typ.
   809  func newarray(typ *_type, n int) unsafe.Pointer {
   810  	if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
   811  		panic(plainError("runtime: allocation size out of range"))
   812  	}
   813  	return mallocgc(typ.size*uintptr(n), typ, true)
   814  }
   815  
   816  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
   817  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
   818  	return newarray(typ, n)
   819  }
   820  
   821  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
   822  	mp.mcache.next_sample = nextSample()
   823  	mProf_Malloc(x, size)
   824  }
   825  
   826  // nextSample returns the next sampling point for heap profiling.
   827  // It produces a random variable with a geometric distribution and
   828  // mean MemProfileRate. This is done by generating a uniformly
   829  // distributed random number and applying the cumulative distribution
   830  // function for an exponential.
   831  func nextSample() int32 {
   832  	if GOOS == "plan9" {
   833  		// Plan 9 doesn't support floating point in note handler.
   834  		if g := getg(); g == g.m.gsignal {
   835  			return nextSampleNoFP()
   836  		}
   837  	}
   838  
   839  	period := MemProfileRate
   840  
   841  	// make nextSample not overflow. Maximum possible step is
   842  	// -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
   843  	switch {
   844  	case period > 0x7000000:
   845  		period = 0x7000000
   846  	case period == 0:
   847  		return 0
   848  	}
   849  
   850  	// Let m be the sample rate,
   851  	// the probability distribution function is m*exp(-mx), so the CDF is
   852  	// p = 1 - exp(-mx), so
   853  	// q = 1 - p == exp(-mx)
   854  	// log_e(q) = -mx
   855  	// -log_e(q)/m = x
   856  	// x = -log_e(q) * period
   857  	// x = log_2(q) * (-log_e(2)) * period    ; Using log_2 for efficiency
   858  	const randomBitCount = 26
   859  	q := fastrand()%(1<<randomBitCount) + 1
   860  	qlog := fastlog2(float64(q)) - randomBitCount
   861  	if qlog > 0 {
   862  		qlog = 0
   863  	}
   864  	const minusLog2 = -0.6931471805599453 // -ln(2)
   865  	return int32(qlog*(minusLog2*float64(period))) + 1
   866  }
   867  
   868  // nextSampleNoFP is similar to nextSample, but uses older,
   869  // simpler code to avoid floating point.
   870  func nextSampleNoFP() int32 {
   871  	// Set first allocation sample size.
   872  	rate := MemProfileRate
   873  	if rate > 0x3fffffff { // make 2*rate not overflow
   874  		rate = 0x3fffffff
   875  	}
   876  	if rate != 0 {
   877  		return int32(int(fastrand()) % (2 * rate))
   878  	}
   879  	return 0
   880  }
   881  
   882  type persistentAlloc struct {
   883  	base unsafe.Pointer
   884  	off  uintptr
   885  }
   886  
   887  var globalAlloc struct {
   888  	mutex
   889  	persistentAlloc
   890  }
   891  
   892  // Wrapper around sysAlloc that can allocate small chunks.
   893  // There is no associated free operation.
   894  // Intended for things like function/type/debug-related persistent data.
   895  // If align is 0, uses default align (currently 8).
   896  // The returned memory will be zeroed.
   897  //
   898  // Consider marking persistentalloc'd types go:notinheap.
   899  func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
   900  	var p unsafe.Pointer
   901  	systemstack(func() {
   902  		p = persistentalloc1(size, align, sysStat)
   903  	})
   904  	return p
   905  }
   906  
   907  // Must run on system stack because stack growth can (re)invoke it.
   908  // See issue 9174.
   909  //go:systemstack
   910  func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
   911  	const (
   912  		chunk    = 256 << 10
   913  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
   914  	)
   915  
   916  	if size == 0 {
   917  		throw("persistentalloc: size == 0")
   918  	}
   919  	if align != 0 {
   920  		if align&(align-1) != 0 {
   921  			throw("persistentalloc: align is not a power of 2")
   922  		}
   923  		if align > _PageSize {
   924  			throw("persistentalloc: align is too large")
   925  		}
   926  	} else {
   927  		align = 8
   928  	}
   929  
   930  	if size >= maxBlock {
   931  		return sysAlloc(size, sysStat)
   932  	}
   933  
   934  	mp := acquirem()
   935  	var persistent *persistentAlloc
   936  	if mp != nil && mp.p != 0 {
   937  		persistent = &mp.p.ptr().palloc
   938  	} else {
   939  		lock(&globalAlloc.mutex)
   940  		persistent = &globalAlloc.persistentAlloc
   941  	}
   942  	persistent.off = round(persistent.off, align)
   943  	if persistent.off+size > chunk || persistent.base == nil {
   944  		persistent.base = sysAlloc(chunk, &memstats.other_sys)
   945  		if persistent.base == nil {
   946  			if persistent == &globalAlloc.persistentAlloc {
   947  				unlock(&globalAlloc.mutex)
   948  			}
   949  			throw("runtime: cannot allocate memory")
   950  		}
   951  		persistent.off = 0
   952  	}
   953  	p := add(persistent.base, persistent.off)
   954  	persistent.off += size
   955  	releasem(mp)
   956  	if persistent == &globalAlloc.persistentAlloc {
   957  		unlock(&globalAlloc.mutex)
   958  	}
   959  
   960  	if sysStat != &memstats.other_sys {
   961  		mSysStatInc(sysStat, size)
   962  		mSysStatDec(&memstats.other_sys, size)
   963  	}
   964  	return p
   965  }