github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/malloc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of in-use pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan's
    60  //	   pages are returned to the mheap and the mspan is now dead.
    61  //
    62  // Allocating and freeing a large object uses the mheap
    63  // directly, bypassing the mcache and mcentral.
    64  //
    65  // If mspan.needzero is false, then free object slots in the mspan are
    66  // already zeroed. Otherwise if needzero is true, objects are zeroed as
    67  // they are allocated. There are various benefits to delaying zeroing
    68  // this way:
    69  //
    70  //	1. Stack frame allocation can avoid zeroing altogether.
    71  //
    72  //	2. It exhibits better temporal locality, since the program is
    73  //	   probably about to write to the memory.
    74  //
    75  //	3. We don't zero pages that never get reused.
    76  
    77  // Virtual memory layout
    78  //
    79  // The heap consists of a set of arenas, which are 64MB on 64-bit and
    80  // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    81  // aligned to the arena size.
    82  //
    83  // Each arena has an associated heapArena object that stores the
    84  // metadata for that arena: the heap bitmap for all words in the arena
    85  // and the span map for all pages in the arena. heapArena objects are
    86  // themselves allocated off-heap.
    87  //
    88  // Since arenas are aligned, the address space can be viewed as a
    89  // series of arena frames. The arena map (mheap_.arenas) maps from
    90  // arena frame number to *heapArena, or nil for parts of the address
    91  // space not backed by the Go heap. The arena map is structured as a
    92  // two-level array consisting of a "L1" arena map and many "L2" arena
    93  // maps; however, since arenas are large, on many architectures, the
    94  // arena map consists of a single, large L2 map.
    95  //
    96  // The arena map covers the entire possible address space, allowing
    97  // the Go heap to use any part of the address space. The allocator
    98  // attempts to keep arenas contiguous so that large spans (and hence
    99  // large objects) can cross arenas.
   100  
   101  package runtime
   102  
   103  import (
   104  	"internal/goarch"
   105  	"internal/goos"
   106  	"runtime/internal/atomic"
   107  	"runtime/internal/math"
   108  	"runtime/internal/sys"
   109  	"unsafe"
   110  )
   111  
   112  const (
   113  	maxTinySize   = _TinySize
   114  	tinySizeClass = _TinySizeClass
   115  	maxSmallSize  = _MaxSmallSize
   116  
   117  	pageShift = _PageShift
   118  	pageSize  = _PageSize
   119  
   120  	concurrentSweep = _ConcurrentSweep
   121  
   122  	_PageSize = 1 << _PageShift
   123  	_PageMask = _PageSize - 1
   124  
   125  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   126  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   127  
   128  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   129  	_TinySize      = 16
   130  	_TinySizeClass = int8(2)
   131  
   132  	_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   133  
   134  	// Per-P, per order stack segment cache size.
   135  	_StackCacheSize = 32 * 1024
   136  
   137  	// Number of orders that get caching. Order 0 is FixedStack
   138  	// and each successive order is twice as large.
   139  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   140  	// will be allocated directly.
   141  	// Since FixedStack is different on different systems, we
   142  	// must vary NumStackOrders to keep the same maximum cached size.
   143  	//   OS               | FixedStack | NumStackOrders
   144  	//   -----------------+------------+---------------
   145  	//   linux/darwin/bsd | 2KB        | 4
   146  	//   windows/32       | 4KB        | 3
   147  	//   windows/64       | 8KB        | 2
   148  	//   plan9            | 4KB        | 3
   149  	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
   150  
   151  	// heapAddrBits is the number of bits in a heap address. On
   152  	// amd64, addresses are sign-extended beyond heapAddrBits. On
   153  	// other arches, they are zero-extended.
   154  	//
   155  	// On most 64-bit platforms, we limit this to 48 bits based on a
   156  	// combination of hardware and OS limitations.
   157  	//
   158  	// amd64 hardware limits addresses to 48 bits, sign-extended
   159  	// to 64 bits. Addresses where the top 16 bits are not either
   160  	// all 0 or all 1 are "non-canonical" and invalid. Because of
   161  	// these "negative" addresses, we offset addresses by 1<<47
   162  	// (arenaBaseOffset) on amd64 before computing indexes into
   163  	// the heap arenas index. In 2017, amd64 hardware added
   164  	// support for 57 bit addresses; however, currently only Linux
   165  	// supports this extension and the kernel will never choose an
   166  	// address above 1<<47 unless mmap is called with a hint
   167  	// address above 1<<47 (which we never do).
   168  	//
   169  	// arm64 hardware (as of ARMv8) limits user addresses to 48
   170  	// bits, in the range [0, 1<<48).
   171  	//
   172  	// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   173  	// in hardware. On Linux, Go leans on stricter OS limits. Based
   174  	// on Linux's processor.h, the user address space is limited as
   175  	// follows on 64-bit architectures:
   176  	//
   177  	// Architecture  Name              Maximum Value (exclusive)
   178  	// ---------------------------------------------------------------------
   179  	// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   180  	// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   181  	// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   182  	// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   183  	// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   184  	//
   185  	// These limits may increase over time, but are currently at
   186  	// most 48 bits except on s390x. On all architectures, Linux
   187  	// starts placing mmap'd regions at addresses that are
   188  	// significantly below 48 bits, so even if it's possible to
   189  	// exceed Go's 48 bit limit, it's extremely unlikely in
   190  	// practice.
   191  	//
   192  	// On 32-bit platforms, we accept the full 32-bit address
   193  	// space because doing so is cheap.
   194  	// mips32 only has access to the low 2GB of virtual memory, so
   195  	// we further limit it to 31 bits.
   196  	//
   197  	// On ios/arm64, although 64-bit pointers are presumably
   198  	// available, pointers are truncated to 33 bits in iOS <14.
   199  	// Furthermore, only the top 4 GiB of the address space are
   200  	// actually available to the application. In iOS >=14, more
   201  	// of the address space is available, and the OS can now
   202  	// provide addresses outside of those 33 bits. Pick 40 bits
   203  	// as a reasonable balance between address space usage by the
   204  	// page allocator, and flexibility for what mmap'd regions
   205  	// we'll accept for the heap. We can't just move to the full
   206  	// 48 bits because this uses too much address space for older
   207  	// iOS versions.
   208  	// TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
   209  	// to a 48-bit address space like every other arm64 platform.
   210  	//
   211  	// WebAssembly currently has a limit of 4GB linear memory.
   212  	heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
   213  
   214  	// maxAlloc is the maximum size of an allocation. On 64-bit,
   215  	// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   216  	// 32-bit, however, this is one less than 1<<32 because the
   217  	// number of bytes in the address space doesn't actually fit
   218  	// in a uintptr.
   219  	maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   220  
   221  	// The number of bits in a heap address, the size of heap
   222  	// arenas, and the L1 and L2 arena map sizes are related by
   223  	//
   224  	//   (1 << addr bits) = arena size * L1 entries * L2 entries
   225  	//
   226  	// Currently, we balance these as follows:
   227  	//
   228  	//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   229  	// --------------  ---------  ----------  ----------  -----------
   230  	//       */64-bit         48        64MB           1    4M (32MB)
   231  	// windows/64-bit         48         4MB          64    1M  (8MB)
   232  	//      ios/arm64         33         4MB           1  2048  (8KB)
   233  	//       */32-bit         32         4MB           1  1024  (4KB)
   234  	//     */mips(le)         31         4MB           1   512  (2KB)
   235  
   236  	// heapArenaBytes is the size of a heap arena. The heap
   237  	// consists of mappings of size heapArenaBytes, aligned to
   238  	// heapArenaBytes. The initial heap mapping is one arena.
   239  	//
   240  	// This is currently 64MB on 64-bit non-Windows and 4MB on
   241  	// 32-bit and on Windows. We use smaller arenas on Windows
   242  	// because all committed memory is charged to the process,
   243  	// even if it's not touched. Hence, for processes with small
   244  	// heaps, the mapped arena space needs to be commensurate.
   245  	// This is particularly important with the race detector,
   246  	// since it significantly amplifies the cost of committed
   247  	// memory.
   248  	heapArenaBytes = 1 << logHeapArenaBytes
   249  
   250  	heapArenaWords = heapArenaBytes / goarch.PtrSize
   251  
   252  	// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   253  	// prefer using heapArenaBytes where possible (we need the
   254  	// constant to compute some other constants).
   255  	logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
   256  
   257  	// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
   258  	heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
   259  
   260  	pagesPerArena = heapArenaBytes / pageSize
   261  
   262  	// arenaL1Bits is the number of bits of the arena number
   263  	// covered by the first level arena map.
   264  	//
   265  	// This number should be small, since the first level arena
   266  	// map requires PtrSize*(1<<arenaL1Bits) of space in the
   267  	// binary's BSS. It can be zero, in which case the first level
   268  	// index is effectively unused. There is a performance benefit
   269  	// to this, since the generated code can be more efficient,
   270  	// but comes at the cost of having a large L2 mapping.
   271  	//
   272  	// We use the L1 map on 64-bit Windows because the arena size
   273  	// is small, but the address space is still 48 bits, and
   274  	// there's a high cost to having a large L2.
   275  	arenaL1Bits = 6 * (_64bit * goos.IsWindows)
   276  
   277  	// arenaL2Bits is the number of bits of the arena number
   278  	// covered by the second level arena index.
   279  	//
   280  	// The size of each arena map allocation is proportional to
   281  	// 1<<arenaL2Bits, so it's important that this not be too
   282  	// large. 48 bits leads to 32MB arena index allocations, which
   283  	// is about the practical threshold.
   284  	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   285  
   286  	// arenaL1Shift is the number of bits to shift an arena frame
   287  	// number by to compute an index into the first level arena map.
   288  	arenaL1Shift = arenaL2Bits
   289  
   290  	// arenaBits is the total bits in a combined arena map index.
   291  	// This is split between the index into the L1 arena map and
   292  	// the L2 arena map.
   293  	arenaBits = arenaL1Bits + arenaL2Bits
   294  
   295  	// arenaBaseOffset is the pointer value that corresponds to
   296  	// index 0 in the heap arena map.
   297  	//
   298  	// On amd64, the address space is 48 bits, sign extended to 64
   299  	// bits. This offset lets us handle "negative" addresses (or
   300  	// high addresses if viewed as unsigned).
   301  	//
   302  	// On aix/ppc64, this offset allows to keep the heapAddrBits to
   303  	// 48. Otherwise, it would be 60 in order to handle mmap addresses
   304  	// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
   305  	// case, the memory reserved in (s *pageAlloc).init for chunks
   306  	// is causing important slowdowns.
   307  	//
   308  	// On other platforms, the user address space is contiguous
   309  	// and starts at 0, so no offset is necessary.
   310  	arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
   311  	// A typed version of this constant that will make it into DWARF (for viewcore).
   312  	arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
   313  
   314  	// Max number of threads to run garbage collection.
   315  	// 2, 3, and 4 are all plausible maximums depending
   316  	// on the hardware details of the machine. The garbage
   317  	// collector scales well to 32 cpus.
   318  	_MaxGcproc = 32
   319  
   320  	// minLegalPointer is the smallest possible legal pointer.
   321  	// This is the smallest possible architectural page size,
   322  	// since we assume that the first page is never mapped.
   323  	//
   324  	// This should agree with minZeroPage in the compiler.
   325  	minLegalPointer uintptr = 4096
   326  
   327  	// minHeapForMetadataHugePages sets a threshold on when certain kinds of
   328  	// heap metadata, currently the arenas map L2 entries and page alloc bitmap
   329  	// mappings, are allowed to be backed by huge pages. If the heap goal ever
   330  	// exceeds this threshold, then huge pages are enabled.
   331  	//
   332  	// These numbers are chosen with the assumption that huge pages are on the
   333  	// order of a few MiB in size.
   334  	//
   335  	// The kind of metadata this applies to has a very low overhead when compared
   336  	// to address space used, but their constant overheads for small heaps would
   337  	// be very high if they were to be backed by huge pages (e.g. a few MiB makes
   338  	// a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
   339  	// heap). The benefit of huge pages is also not worth it for small heaps,
   340  	// because only a very, very small part of the metadata is used for small heaps.
   341  	//
   342  	// N.B. If the heap goal exceeds the threshold then shrinks to a very small size
   343  	// again, then huge pages will still be enabled for this mapping. The reason is that
   344  	// there's no point unless we're also returning the physical memory for these
   345  	// metadata mappings back to the OS. That would be quite complex to do in general
   346  	// as the heap is likely fragmented after a reduction in heap size.
   347  	minHeapForMetadataHugePages = 1 << 30
   348  )
   349  
   350  // physPageSize is the size in bytes of the OS's physical pages.
   351  // Mapping and unmapping operations must be done at multiples of
   352  // physPageSize.
   353  //
   354  // This must be set by the OS init code (typically in osinit) before
   355  // mallocinit.
   356  var physPageSize uintptr
   357  
   358  // physHugePageSize is the size in bytes of the OS's default physical huge
   359  // page size whose allocation is opaque to the application. It is assumed
   360  // and verified to be a power of two.
   361  //
   362  // If set, this must be set by the OS init code (typically in osinit) before
   363  // mallocinit. However, setting it at all is optional, and leaving the default
   364  // value is always safe (though potentially less efficient).
   365  //
   366  // Since physHugePageSize is always assumed to be a power of two,
   367  // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   368  // The purpose of physHugePageShift is to avoid doing divisions in
   369  // performance critical functions.
   370  var (
   371  	physHugePageSize  uintptr
   372  	physHugePageShift uint
   373  )
   374  
   375  func mallocinit() {
   376  	if class_to_size[_TinySizeClass] != _TinySize {
   377  		throw("bad TinySizeClass")
   378  	}
   379  
   380  	if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
   381  		// heapBits expects modular arithmetic on bitmap
   382  		// addresses to work.
   383  		throw("heapArenaBitmapWords not a power of 2")
   384  	}
   385  
   386  	// Check physPageSize.
   387  	if physPageSize == 0 {
   388  		// The OS init code failed to fetch the physical page size.
   389  		throw("failed to get system page size")
   390  	}
   391  	if physPageSize > maxPhysPageSize {
   392  		print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
   393  		throw("bad system page size")
   394  	}
   395  	if physPageSize < minPhysPageSize {
   396  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   397  		throw("bad system page size")
   398  	}
   399  	if physPageSize&(physPageSize-1) != 0 {
   400  		print("system page size (", physPageSize, ") must be a power of 2\n")
   401  		throw("bad system page size")
   402  	}
   403  	if physHugePageSize&(physHugePageSize-1) != 0 {
   404  		print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   405  		throw("bad system huge page size")
   406  	}
   407  	if physHugePageSize > maxPhysHugePageSize {
   408  		// physHugePageSize is greater than the maximum supported huge page size.
   409  		// Don't throw here, like in the other cases, since a system configured
   410  		// in this way isn't wrong, we just don't have the code to support them.
   411  		// Instead, silently set the huge page size to zero.
   412  		physHugePageSize = 0
   413  	}
   414  	if physHugePageSize != 0 {
   415  		// Since physHugePageSize is a power of 2, it suffices to increase
   416  		// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   417  		for 1<<physHugePageShift != physHugePageSize {
   418  			physHugePageShift++
   419  		}
   420  	}
   421  	if pagesPerArena%pagesPerSpanRoot != 0 {
   422  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
   423  		throw("bad pagesPerSpanRoot")
   424  	}
   425  	if pagesPerArena%pagesPerReclaimerChunk != 0 {
   426  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
   427  		throw("bad pagesPerReclaimerChunk")
   428  	}
   429  
   430  	if minTagBits > taggedPointerBits {
   431  		throw("taggedPointerbits too small")
   432  	}
   433  
   434  	// Initialize the heap.
   435  	mheap_.init()
   436  	mcache0 = allocmcache()
   437  	lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
   438  	lockInit(&profInsertLock, lockRankProfInsert)
   439  	lockInit(&profBlockLock, lockRankProfBlock)
   440  	lockInit(&profMemActiveLock, lockRankProfMemActive)
   441  	for i := range profMemFutureLock {
   442  		lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
   443  	}
   444  	lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
   445  
   446  	// Create initial arena growth hints.
   447  	if goarch.PtrSize == 8 {
   448  		// On a 64-bit machine, we pick the following hints
   449  		// because:
   450  		//
   451  		// 1. Starting from the middle of the address space
   452  		// makes it easier to grow out a contiguous range
   453  		// without running in to some other mapping.
   454  		//
   455  		// 2. This makes Go heap addresses more easily
   456  		// recognizable when debugging.
   457  		//
   458  		// 3. Stack scanning in gccgo is still conservative,
   459  		// so it's important that addresses be distinguishable
   460  		// from other data.
   461  		//
   462  		// Starting at 0x00c0 means that the valid memory addresses
   463  		// will begin 0x00c0, 0x00c1, ...
   464  		// In little-endian, that's c0 00, c1 00, ... None of those are valid
   465  		// UTF-8 sequences, and they are otherwise as far away from
   466  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   467  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   468  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   469  		// AddressSanitizer which reserves all memory up to 0x0100.
   470  		// These choices reduce the odds of a conservative garbage collector
   471  		// not collecting memory because some non-pointer block of memory
   472  		// had a bit pattern that matched a memory address.
   473  		//
   474  		// However, on arm64, we ignore all this advice above and slam the
   475  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   476  		// translation buffers, the user address space is limited to 39 bits
   477  		// On ios/arm64, the address space is even smaller.
   478  		//
   479  		// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   480  		// processes.
   481  		//
   482  		// Space mapped for user arenas comes immediately after the range
   483  		// originally reserved for the regular heap when race mode is not
   484  		// enabled because user arena chunks can never be used for regular heap
   485  		// allocations and we want to avoid fragmenting the address space.
   486  		//
   487  		// In race mode we have no choice but to just use the same hints because
   488  		// the race detector requires that the heap be mapped contiguously.
   489  		for i := 0x7f; i >= 0; i-- {
   490  			var p uintptr
   491  			switch {
   492  			case raceenabled:
   493  				// The TSAN runtime requires the heap
   494  				// to be in the range [0x00c000000000,
   495  				// 0x00e000000000).
   496  				p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   497  				if p >= uintptrMask&0x00e000000000 {
   498  					continue
   499  				}
   500  			case GOARCH == "arm64" && GOOS == "ios":
   501  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   502  			case GOARCH == "arm64":
   503  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   504  			case GOOS == "aix":
   505  				if i == 0 {
   506  					// We don't use addresses directly after 0x0A00000000000000
   507  					// to avoid collisions with others mmaps done by non-go programs.
   508  					continue
   509  				}
   510  				p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   511  			default:
   512  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   513  			}
   514  			// Switch to generating hints for user arenas if we've gone
   515  			// through about half the hints. In race mode, take only about
   516  			// a quarter; we don't have very much space to work with.
   517  			hintList := &mheap_.arenaHints
   518  			if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
   519  				hintList = &mheap_.userArena.arenaHints
   520  			}
   521  			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   522  			hint.addr = p
   523  			hint.next, *hintList = *hintList, hint
   524  		}
   525  	} else {
   526  		// On a 32-bit machine, we're much more concerned
   527  		// about keeping the usable heap contiguous.
   528  		// Hence:
   529  		//
   530  		// 1. We reserve space for all heapArenas up front so
   531  		// they don't get interleaved with the heap. They're
   532  		// ~258MB, so this isn't too bad. (We could reserve a
   533  		// smaller amount of space up front if this is a
   534  		// problem.)
   535  		//
   536  		// 2. We hint the heap to start right above the end of
   537  		// the binary so we have the best chance of keeping it
   538  		// contiguous.
   539  		//
   540  		// 3. We try to stake out a reasonably large initial
   541  		// heap reservation.
   542  
   543  		const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   544  		meta := uintptr(sysReserve(nil, arenaMetaSize))
   545  		if meta != 0 {
   546  			mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
   547  		}
   548  
   549  		// We want to start the arena low, but if we're linked
   550  		// against C code, it's possible global constructors
   551  		// have called malloc and adjusted the process' brk.
   552  		// Query the brk so we can avoid trying to map the
   553  		// region over it (which will cause the kernel to put
   554  		// the region somewhere else, likely at a high
   555  		// address).
   556  		procBrk := sbrk0()
   557  
   558  		// If we ask for the end of the data segment but the
   559  		// operating system requires a little more space
   560  		// before we can start allocating, it will give out a
   561  		// slightly higher pointer. Except QEMU, which is
   562  		// buggy, as usual: it won't adjust the pointer
   563  		// upward. So adjust it upward a little bit ourselves:
   564  		// 1/4 MB to get away from the running binary image.
   565  		p := firstmoduledata.end
   566  		if p < procBrk {
   567  			p = procBrk
   568  		}
   569  		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   570  			p = mheap_.heapArenaAlloc.end
   571  		}
   572  		p = alignUp(p+(256<<10), heapArenaBytes)
   573  		// Because we're worried about fragmentation on
   574  		// 32-bit, we try to make a large initial reservation.
   575  		arenaSizes := []uintptr{
   576  			512 << 20,
   577  			256 << 20,
   578  			128 << 20,
   579  		}
   580  		for _, arenaSize := range arenaSizes {
   581  			a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
   582  			if a != nil {
   583  				mheap_.arena.init(uintptr(a), size, false)
   584  				p = mheap_.arena.end // For hint below
   585  				break
   586  			}
   587  		}
   588  		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   589  		hint.addr = p
   590  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   591  
   592  		// Place the hint for user arenas just after the large reservation.
   593  		//
   594  		// While this potentially competes with the hint above, in practice we probably
   595  		// aren't going to be getting this far anyway on 32-bit platforms.
   596  		userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   597  		userArenaHint.addr = p
   598  		userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
   599  	}
   600  	// Initialize the memory limit here because the allocator is going to look at it
   601  	// but we haven't called gcinit yet and we're definitely going to allocate memory before then.
   602  	gcController.memoryLimit.Store(maxInt64)
   603  }
   604  
   605  // sysAlloc allocates heap arena space for at least n bytes. The
   606  // returned pointer is always heapArenaBytes-aligned and backed by
   607  // h.arenas metadata. The returned size is always a multiple of
   608  // heapArenaBytes. sysAlloc returns nil on failure.
   609  // There is no corresponding free function.
   610  //
   611  // hintList is a list of hint addresses for where to allocate new
   612  // heap arenas. It must be non-nil.
   613  //
   614  // register indicates whether the heap arena should be registered
   615  // in allArenas.
   616  //
   617  // sysAlloc returns a memory region in the Reserved state. This region must
   618  // be transitioned to Prepared and then Ready before use.
   619  //
   620  // h must be locked.
   621  func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) {
   622  	assertLockHeld(&h.lock)
   623  
   624  	n = alignUp(n, heapArenaBytes)
   625  
   626  	if hintList == &h.arenaHints {
   627  		// First, try the arena pre-reservation.
   628  		// Newly-used mappings are considered released.
   629  		//
   630  		// Only do this if we're using the regular heap arena hints.
   631  		// This behavior is only for the heap.
   632  		v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
   633  		if v != nil {
   634  			size = n
   635  			goto mapped
   636  		}
   637  	}
   638  
   639  	// Try to grow the heap at a hint address.
   640  	for *hintList != nil {
   641  		hint := *hintList
   642  		p := hint.addr
   643  		if hint.down {
   644  			p -= n
   645  		}
   646  		if p+n < p {
   647  			// We can't use this, so don't ask.
   648  			v = nil
   649  		} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   650  			// Outside addressable heap. Can't use.
   651  			v = nil
   652  		} else {
   653  			v = sysReserve(unsafe.Pointer(p), n)
   654  		}
   655  		if p == uintptr(v) {
   656  			// Success. Update the hint.
   657  			if !hint.down {
   658  				p += n
   659  			}
   660  			hint.addr = p
   661  			size = n
   662  			break
   663  		}
   664  		// Failed. Discard this hint and try the next.
   665  		//
   666  		// TODO: This would be cleaner if sysReserve could be
   667  		// told to only return the requested address. In
   668  		// particular, this is already how Windows behaves, so
   669  		// it would simplify things there.
   670  		if v != nil {
   671  			sysFreeOS(v, n)
   672  		}
   673  		*hintList = hint.next
   674  		h.arenaHintAlloc.free(unsafe.Pointer(hint))
   675  	}
   676  
   677  	if size == 0 {
   678  		if raceenabled {
   679  			// The race detector assumes the heap lives in
   680  			// [0x00c000000000, 0x00e000000000), but we
   681  			// just ran out of hints in this region. Give
   682  			// a nice failure.
   683  			throw("too many address space collisions for -race mode")
   684  		}
   685  
   686  		// All of the hints failed, so we'll take any
   687  		// (sufficiently aligned) address the kernel will give
   688  		// us.
   689  		v, size = sysReserveAligned(nil, n, heapArenaBytes)
   690  		if v == nil {
   691  			return nil, 0
   692  		}
   693  
   694  		// Create new hints for extending this region.
   695  		hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   696  		hint.addr, hint.down = uintptr(v), true
   697  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   698  		hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   699  		hint.addr = uintptr(v) + size
   700  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   701  	}
   702  
   703  	// Check for bad pointers or pointers we can't use.
   704  	{
   705  		var bad string
   706  		p := uintptr(v)
   707  		if p+size < p {
   708  			bad = "region exceeds uintptr range"
   709  		} else if arenaIndex(p) >= 1<<arenaBits {
   710  			bad = "base outside usable address space"
   711  		} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   712  			bad = "end outside usable address space"
   713  		}
   714  		if bad != "" {
   715  			// This should be impossible on most architectures,
   716  			// but it would be really confusing to debug.
   717  			print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   718  			throw("memory reservation exceeds address space limit")
   719  		}
   720  	}
   721  
   722  	if uintptr(v)&(heapArenaBytes-1) != 0 {
   723  		throw("misrounded allocation in sysAlloc")
   724  	}
   725  
   726  mapped:
   727  	// Create arena metadata.
   728  	for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   729  		l2 := h.arenas[ri.l1()]
   730  		if l2 == nil {
   731  			// Allocate an L2 arena map.
   732  			//
   733  			// Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
   734  			// statistic we can comfortably account for this space in. With this structure,
   735  			// we rely on demand paging to avoid large overheads, but tracking which memory
   736  			// is paged in is too expensive. Trying to account for the whole region means
   737  			// that it will appear like an enormous memory overhead in statistics, even though
   738  			// it is not.
   739  			l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
   740  			if l2 == nil {
   741  				throw("out of memory allocating heap arena map")
   742  			}
   743  			if h.arenasHugePages {
   744  				sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   745  			} else {
   746  				sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   747  			}
   748  			atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   749  		}
   750  
   751  		if l2[ri.l2()] != nil {
   752  			throw("arena already initialized")
   753  		}
   754  		var r *heapArena
   755  		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   756  		if r == nil {
   757  			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   758  			if r == nil {
   759  				throw("out of memory allocating heap arena metadata")
   760  			}
   761  		}
   762  
   763  		// Register the arena in allArenas if requested.
   764  		if register {
   765  			if len(h.allArenas) == cap(h.allArenas) {
   766  				size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
   767  				if size == 0 {
   768  					size = physPageSize
   769  				}
   770  				newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
   771  				if newArray == nil {
   772  					throw("out of memory allocating allArenas")
   773  				}
   774  				oldSlice := h.allArenas
   775  				*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
   776  				copy(h.allArenas, oldSlice)
   777  				// Do not free the old backing array because
   778  				// there may be concurrent readers. Since we
   779  				// double the array each time, this can lead
   780  				// to at most 2x waste.
   781  			}
   782  			h.allArenas = h.allArenas[:len(h.allArenas)+1]
   783  			h.allArenas[len(h.allArenas)-1] = ri
   784  		}
   785  
   786  		// Store atomically just in case an object from the
   787  		// new heap arena becomes visible before the heap lock
   788  		// is released (which shouldn't happen, but there's
   789  		// little downside to this).
   790  		atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   791  	}
   792  
   793  	// Tell the race detector about the new heap memory.
   794  	if raceenabled {
   795  		racemapshadow(v, size)
   796  	}
   797  
   798  	return
   799  }
   800  
   801  // sysReserveAligned is like sysReserve, but the returned pointer is
   802  // aligned to align bytes. It may reserve either n or n+align bytes,
   803  // so it returns the size that was reserved.
   804  func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
   805  	// Since the alignment is rather large in uses of this
   806  	// function, we're not likely to get it by chance, so we ask
   807  	// for a larger region and remove the parts we don't need.
   808  	retries := 0
   809  retry:
   810  	p := uintptr(sysReserve(v, size+align))
   811  	switch {
   812  	case p == 0:
   813  		return nil, 0
   814  	case p&(align-1) == 0:
   815  		return unsafe.Pointer(p), size + align
   816  	case GOOS == "windows":
   817  		// On Windows we can't release pieces of a
   818  		// reservation, so we release the whole thing and
   819  		// re-reserve the aligned sub-region. This may race,
   820  		// so we may have to try again.
   821  		sysFreeOS(unsafe.Pointer(p), size+align)
   822  		p = alignUp(p, align)
   823  		p2 := sysReserve(unsafe.Pointer(p), size)
   824  		if p != uintptr(p2) {
   825  			// Must have raced. Try again.
   826  			sysFreeOS(p2, size)
   827  			if retries++; retries == 100 {
   828  				throw("failed to allocate aligned heap memory; too many retries")
   829  			}
   830  			goto retry
   831  		}
   832  		// Success.
   833  		return p2, size
   834  	default:
   835  		// Trim off the unaligned parts.
   836  		pAligned := alignUp(p, align)
   837  		sysFreeOS(unsafe.Pointer(p), pAligned-p)
   838  		end := pAligned + size
   839  		endLen := (p + size + align) - end
   840  		if endLen > 0 {
   841  			sysFreeOS(unsafe.Pointer(end), endLen)
   842  		}
   843  		return unsafe.Pointer(pAligned), size
   844  	}
   845  }
   846  
   847  // enableMetadataHugePages enables huge pages for various sources of heap metadata.
   848  //
   849  // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
   850  // time, but may take time proportional to the size of the mapped heap beyond that.
   851  //
   852  // This function is idempotent.
   853  //
   854  // The heap lock must not be held over this operation, since it will briefly acquire
   855  // the heap lock.
   856  func (h *mheap) enableMetadataHugePages() {
   857  	// Enable huge pages for page structure.
   858  	h.pages.enableChunkHugePages()
   859  
   860  	// Grab the lock and set arenasHugePages if it's not.
   861  	//
   862  	// Once arenasHugePages is set, all new L2 entries will be eligible for
   863  	// huge pages. We'll set all the old entries after we release the lock.
   864  	lock(&h.lock)
   865  	if h.arenasHugePages {
   866  		unlock(&h.lock)
   867  		return
   868  	}
   869  	h.arenasHugePages = true
   870  	unlock(&h.lock)
   871  
   872  	// N.B. The arenas L1 map is quite small on all platforms, so it's fine to
   873  	// just iterate over the whole thing.
   874  	for i := range h.arenas {
   875  		l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
   876  		if l2 == nil {
   877  			continue
   878  		}
   879  		sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   880  	}
   881  }
   882  
   883  // base address for all 0-byte allocations
   884  var zerobase uintptr
   885  
   886  // nextFreeFast returns the next free object if one is quickly available.
   887  // Otherwise it returns 0.
   888  func nextFreeFast(s *mspan) gclinkptr {
   889  	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
   890  	if theBit < 64 {
   891  		result := s.freeindex + uintptr(theBit)
   892  		if result < s.nelems {
   893  			freeidx := result + 1
   894  			if freeidx%64 == 0 && freeidx != s.nelems {
   895  				return 0
   896  			}
   897  			s.allocCache >>= uint(theBit + 1)
   898  			s.freeindex = freeidx
   899  			s.allocCount++
   900  			return gclinkptr(result*s.elemsize + s.base())
   901  		}
   902  	}
   903  	return 0
   904  }
   905  
   906  // nextFree returns the next free object from the cached span if one is available.
   907  // Otherwise it refills the cache with a span with an available object and
   908  // returns that object along with a flag indicating that this was a heavy
   909  // weight allocation. If it is a heavy weight allocation the caller must
   910  // determine whether a new GC cycle needs to be started or if the GC is active
   911  // whether this goroutine needs to assist the GC.
   912  //
   913  // Must run in a non-preemptible context since otherwise the owner of
   914  // c could change.
   915  func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   916  	s = c.alloc[spc]
   917  	shouldhelpgc = false
   918  	freeIndex := s.nextFreeIndex()
   919  	if freeIndex == s.nelems {
   920  		// The span is full.
   921  		if uintptr(s.allocCount) != s.nelems {
   922  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   923  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   924  		}
   925  		c.refill(spc)
   926  		shouldhelpgc = true
   927  		s = c.alloc[spc]
   928  
   929  		freeIndex = s.nextFreeIndex()
   930  	}
   931  
   932  	if freeIndex >= s.nelems {
   933  		throw("freeIndex is not valid")
   934  	}
   935  
   936  	v = gclinkptr(freeIndex*s.elemsize + s.base())
   937  	s.allocCount++
   938  	if uintptr(s.allocCount) > s.nelems {
   939  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   940  		throw("s.allocCount > s.nelems")
   941  	}
   942  	return
   943  }
   944  
   945  // Allocate an object of size bytes.
   946  // Small objects are allocated from the per-P cache's free lists.
   947  // Large objects (> 32 kB) are allocated straight from the heap.
   948  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   949  	if gcphase == _GCmarktermination {
   950  		throw("mallocgc called with gcphase == _GCmarktermination")
   951  	}
   952  
   953  	if size == 0 {
   954  		return unsafe.Pointer(&zerobase)
   955  	}
   956  
   957  	// It's possible for any malloc to trigger sweeping, which may in
   958  	// turn queue finalizers. Record this dynamic lock edge.
   959  	lockRankMayQueueFinalizer()
   960  
   961  	userSize := size
   962  	if asanenabled {
   963  		// Refer to ASAN runtime library, the malloc() function allocates extra memory,
   964  		// the redzone, around the user requested memory region. And the redzones are marked
   965  		// as unaddressable. We perform the same operations in Go to detect the overflows or
   966  		// underflows.
   967  		size += computeRZlog(size)
   968  	}
   969  
   970  	if debug.malloc {
   971  		if debug.sbrk != 0 {
   972  			align := uintptr(16)
   973  			if typ != nil {
   974  				// TODO(austin): This should be just
   975  				//   align = uintptr(typ.align)
   976  				// but that's only 4 on 32-bit platforms,
   977  				// even if there's a uint64 field in typ (see #599).
   978  				// This causes 64-bit atomic accesses to panic.
   979  				// Hence, we use stricter alignment that matches
   980  				// the normal allocator better.
   981  				if size&7 == 0 {
   982  					align = 8
   983  				} else if size&3 == 0 {
   984  					align = 4
   985  				} else if size&1 == 0 {
   986  					align = 2
   987  				} else {
   988  					align = 1
   989  				}
   990  			}
   991  			return persistentalloc(size, align, &memstats.other_sys)
   992  		}
   993  
   994  		if inittrace.active && inittrace.id == getg().goid {
   995  			// Init functions are executed sequentially in a single goroutine.
   996  			inittrace.allocs += 1
   997  		}
   998  	}
   999  
  1000  	// assistG is the G to charge for this allocation, or nil if
  1001  	// GC is not currently active.
  1002  	assistG := deductAssistCredit(size)
  1003  
  1004  	// Set mp.mallocing to keep from being preempted by GC.
  1005  	mp := acquirem()
  1006  	if mp.mallocing != 0 {
  1007  		throw("malloc deadlock")
  1008  	}
  1009  	if mp.gsignal == getg() {
  1010  		throw("malloc during signal")
  1011  	}
  1012  	mp.mallocing = 1
  1013  
  1014  	shouldhelpgc := false
  1015  	dataSize := userSize
  1016  	c := getMCache(mp)
  1017  	if c == nil {
  1018  		throw("mallocgc called without a P or outside bootstrapping")
  1019  	}
  1020  	var span *mspan
  1021  	var x unsafe.Pointer
  1022  	noscan := typ == nil || typ.PtrBytes == 0
  1023  	// In some cases block zeroing can profitably (for latency reduction purposes)
  1024  	// be delayed till preemption is possible; delayedZeroing tracks that state.
  1025  	delayedZeroing := false
  1026  	if size <= maxSmallSize {
  1027  		if noscan && size < maxTinySize {
  1028  			// Tiny allocator.
  1029  			//
  1030  			// Tiny allocator combines several tiny allocation requests
  1031  			// into a single memory block. The resulting memory block
  1032  			// is freed when all subobjects are unreachable. The subobjects
  1033  			// must be noscan (don't have pointers), this ensures that
  1034  			// the amount of potentially wasted memory is bounded.
  1035  			//
  1036  			// Size of the memory block used for combining (maxTinySize) is tunable.
  1037  			// Current setting is 16 bytes, which relates to 2x worst case memory
  1038  			// wastage (when all but one subobjects are unreachable).
  1039  			// 8 bytes would result in no wastage at all, but provides less
  1040  			// opportunities for combining.
  1041  			// 32 bytes provides more opportunities for combining,
  1042  			// but can lead to 4x worst case wastage.
  1043  			// The best case winning is 8x regardless of block size.
  1044  			//
  1045  			// Objects obtained from tiny allocator must not be freed explicitly.
  1046  			// So when an object will be freed explicitly, we ensure that
  1047  			// its size >= maxTinySize.
  1048  			//
  1049  			// SetFinalizer has a special case for objects potentially coming
  1050  			// from tiny allocator, it such case it allows to set finalizers
  1051  			// for an inner byte of a memory block.
  1052  			//
  1053  			// The main targets of tiny allocator are small strings and
  1054  			// standalone escaping variables. On a json benchmark
  1055  			// the allocator reduces number of allocations by ~12% and
  1056  			// reduces heap size by ~20%.
  1057  			off := c.tinyoffset
  1058  			// Align tiny pointer for required (conservative) alignment.
  1059  			if size&7 == 0 {
  1060  				off = alignUp(off, 8)
  1061  			} else if goarch.PtrSize == 4 && size == 12 {
  1062  				// Conservatively align 12-byte objects to 8 bytes on 32-bit
  1063  				// systems so that objects whose first field is a 64-bit
  1064  				// value is aligned to 8 bytes and does not cause a fault on
  1065  				// atomic access. See issue 37262.
  1066  				// TODO(mknyszek): Remove this workaround if/when issue 36606
  1067  				// is resolved.
  1068  				off = alignUp(off, 8)
  1069  			} else if size&3 == 0 {
  1070  				off = alignUp(off, 4)
  1071  			} else if size&1 == 0 {
  1072  				off = alignUp(off, 2)
  1073  			}
  1074  			if off+size <= maxTinySize && c.tiny != 0 {
  1075  				// The object fits into existing tiny block.
  1076  				x = unsafe.Pointer(c.tiny + off)
  1077  				c.tinyoffset = off + size
  1078  				c.tinyAllocs++
  1079  				mp.mallocing = 0
  1080  				releasem(mp)
  1081  				return x
  1082  			}
  1083  			// Allocate a new maxTinySize block.
  1084  			span = c.alloc[tinySpanClass]
  1085  			v := nextFreeFast(span)
  1086  			if v == 0 {
  1087  				v, span, shouldhelpgc = c.nextFree(tinySpanClass)
  1088  			}
  1089  			x = unsafe.Pointer(v)
  1090  			(*[2]uint64)(x)[0] = 0
  1091  			(*[2]uint64)(x)[1] = 0
  1092  			// See if we need to replace the existing tiny block with the new one
  1093  			// based on amount of remaining free space.
  1094  			if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
  1095  				// Note: disabled when race detector is on, see comment near end of this function.
  1096  				c.tiny = uintptr(x)
  1097  				c.tinyoffset = size
  1098  			}
  1099  			size = maxTinySize
  1100  		} else {
  1101  			var sizeclass uint8
  1102  			if size <= smallSizeMax-8 {
  1103  				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
  1104  			} else {
  1105  				sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
  1106  			}
  1107  			size = uintptr(class_to_size[sizeclass])
  1108  			spc := makeSpanClass(sizeclass, noscan)
  1109  			span = c.alloc[spc]
  1110  			v := nextFreeFast(span)
  1111  			if v == 0 {
  1112  				v, span, shouldhelpgc = c.nextFree(spc)
  1113  			}
  1114  			x = unsafe.Pointer(v)
  1115  			if needzero && span.needzero != 0 {
  1116  				memclrNoHeapPointers(x, size)
  1117  			}
  1118  		}
  1119  	} else {
  1120  		shouldhelpgc = true
  1121  		// For large allocations, keep track of zeroed state so that
  1122  		// bulk zeroing can be happen later in a preemptible context.
  1123  		span = c.allocLarge(size, noscan)
  1124  		span.freeindex = 1
  1125  		span.allocCount = 1
  1126  		size = span.elemsize
  1127  		x = unsafe.Pointer(span.base())
  1128  		if needzero && span.needzero != 0 {
  1129  			if noscan {
  1130  				delayedZeroing = true
  1131  			} else {
  1132  				memclrNoHeapPointers(x, size)
  1133  				// We've in theory cleared almost the whole span here,
  1134  				// and could take the extra step of actually clearing
  1135  				// the whole thing. However, don't. Any GC bits for the
  1136  				// uncleared parts will be zero, and it's just going to
  1137  				// be needzero = 1 once freed anyway.
  1138  			}
  1139  		}
  1140  	}
  1141  
  1142  	if !noscan {
  1143  		var scanSize uintptr
  1144  		heapBitsSetType(uintptr(x), size, dataSize, typ)
  1145  		if dataSize > typ.Size_ {
  1146  			// Array allocation. If there are any
  1147  			// pointers, GC has to scan to the last
  1148  			// element.
  1149  			if typ.PtrBytes != 0 {
  1150  				scanSize = dataSize - typ.Size_ + typ.PtrBytes
  1151  			}
  1152  		} else {
  1153  			scanSize = typ.PtrBytes
  1154  		}
  1155  		c.scanAlloc += scanSize
  1156  	}
  1157  
  1158  	// Ensure that the stores above that initialize x to
  1159  	// type-safe memory and set the heap bits occur before
  1160  	// the caller can make x observable to the garbage
  1161  	// collector. Otherwise, on weakly ordered machines,
  1162  	// the garbage collector could follow a pointer to x,
  1163  	// but see uninitialized memory or stale heap bits.
  1164  	publicationBarrier()
  1165  	// As x and the heap bits are initialized, update
  1166  	// freeIndexForScan now so x is seen by the GC
  1167  	// (including convervative scan) as an allocated object.
  1168  	// While this pointer can't escape into user code as a
  1169  	// _live_ pointer until we return, conservative scanning
  1170  	// may find a dead pointer that happens to point into this
  1171  	// object. Delaying this update until now ensures that
  1172  	// conservative scanning considers this pointer dead until
  1173  	// this point.
  1174  	span.freeIndexForScan = span.freeindex
  1175  
  1176  	// Allocate black during GC.
  1177  	// All slots hold nil so no scanning is needed.
  1178  	// This may be racing with GC so do it atomically if there can be
  1179  	// a race marking the bit.
  1180  	if gcphase != _GCoff {
  1181  		gcmarknewobject(span, uintptr(x), size)
  1182  	}
  1183  
  1184  	if raceenabled {
  1185  		racemalloc(x, size)
  1186  	}
  1187  
  1188  	if msanenabled {
  1189  		msanmalloc(x, size)
  1190  	}
  1191  
  1192  	if asanenabled {
  1193  		// We should only read/write the memory with the size asked by the user.
  1194  		// The rest of the allocated memory should be poisoned, so that we can report
  1195  		// errors when accessing poisoned memory.
  1196  		// The allocated memory is larger than required userSize, it will also include
  1197  		// redzone and some other padding bytes.
  1198  		rzBeg := unsafe.Add(x, userSize)
  1199  		asanpoison(rzBeg, size-userSize)
  1200  		asanunpoison(x, userSize)
  1201  	}
  1202  
  1203  	if rate := MemProfileRate; rate > 0 {
  1204  		// Note cache c only valid while m acquired; see #47302
  1205  		if rate != 1 && size < c.nextSample {
  1206  			c.nextSample -= size
  1207  		} else {
  1208  			profilealloc(mp, x, size)
  1209  		}
  1210  	}
  1211  	mp.mallocing = 0
  1212  	releasem(mp)
  1213  
  1214  	// Pointerfree data can be zeroed late in a context where preemption can occur.
  1215  	// x will keep the memory alive.
  1216  	if delayedZeroing {
  1217  		if !noscan {
  1218  			throw("delayed zeroing on data that may contain pointers")
  1219  		}
  1220  		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
  1221  	}
  1222  
  1223  	if debug.malloc {
  1224  		if debug.allocfreetrace != 0 {
  1225  			tracealloc(x, size, typ)
  1226  		}
  1227  
  1228  		if inittrace.active && inittrace.id == getg().goid {
  1229  			// Init functions are executed sequentially in a single goroutine.
  1230  			inittrace.bytes += uint64(size)
  1231  		}
  1232  	}
  1233  
  1234  	if assistG != nil {
  1235  		// Account for internal fragmentation in the assist
  1236  		// debt now that we know it.
  1237  		assistG.gcAssistBytes -= int64(size - dataSize)
  1238  	}
  1239  
  1240  	if shouldhelpgc {
  1241  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1242  			gcStart(t)
  1243  		}
  1244  	}
  1245  
  1246  	if raceenabled && noscan && dataSize < maxTinySize {
  1247  		// Pad tinysize allocations so they are aligned with the end
  1248  		// of the tinyalloc region. This ensures that any arithmetic
  1249  		// that goes off the top end of the object will be detectable
  1250  		// by checkptr (issue 38872).
  1251  		// Note that we disable tinyalloc when raceenabled for this to work.
  1252  		// TODO: This padding is only performed when the race detector
  1253  		// is enabled. It would be nice to enable it if any package
  1254  		// was compiled with checkptr, but there's no easy way to
  1255  		// detect that (especially at compile time).
  1256  		// TODO: enable this padding for all allocations, not just
  1257  		// tinyalloc ones. It's tricky because of pointer maps.
  1258  		// Maybe just all noscan objects?
  1259  		x = add(x, size-dataSize)
  1260  	}
  1261  
  1262  	return x
  1263  }
  1264  
  1265  // deductAssistCredit reduces the current G's assist credit
  1266  // by size bytes, and assists the GC if necessary.
  1267  //
  1268  // Caller must be preemptible.
  1269  //
  1270  // Returns the G for which the assist credit was accounted.
  1271  func deductAssistCredit(size uintptr) *g {
  1272  	var assistG *g
  1273  	if gcBlackenEnabled != 0 {
  1274  		// Charge the current user G for this allocation.
  1275  		assistG = getg()
  1276  		if assistG.m.curg != nil {
  1277  			assistG = assistG.m.curg
  1278  		}
  1279  		// Charge the allocation against the G. We'll account
  1280  		// for internal fragmentation at the end of mallocgc.
  1281  		assistG.gcAssistBytes -= int64(size)
  1282  
  1283  		if assistG.gcAssistBytes < 0 {
  1284  			// This G is in debt. Assist the GC to correct
  1285  			// this before allocating. This must happen
  1286  			// before disabling preemption.
  1287  			gcAssistAlloc(assistG)
  1288  		}
  1289  	}
  1290  	return assistG
  1291  }
  1292  
  1293  // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
  1294  // on chunks of the buffer to be zeroed, with opportunities for preemption
  1295  // along the way.  memclrNoHeapPointers contains no safepoints and also
  1296  // cannot be preemptively scheduled, so this provides a still-efficient
  1297  // block copy that can also be preempted on a reasonable granularity.
  1298  //
  1299  // Use this with care; if the data being cleared is tagged to contain
  1300  // pointers, this allows the GC to run before it is all cleared.
  1301  func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
  1302  	v := uintptr(x)
  1303  	// got this from benchmarking. 128k is too small, 512k is too large.
  1304  	const chunkBytes = 256 * 1024
  1305  	vsize := v + size
  1306  	for voff := v; voff < vsize; voff = voff + chunkBytes {
  1307  		if getg().preempt {
  1308  			// may hold locks, e.g., profiling
  1309  			goschedguarded()
  1310  		}
  1311  		// clear min(avail, lump) bytes
  1312  		n := vsize - voff
  1313  		if n > chunkBytes {
  1314  			n = chunkBytes
  1315  		}
  1316  		memclrNoHeapPointers(unsafe.Pointer(voff), n)
  1317  	}
  1318  }
  1319  
  1320  // implementation of new builtin
  1321  // compiler (both frontend and SSA backend) knows the signature
  1322  // of this function.
  1323  func newobject(typ *_type) unsafe.Pointer {
  1324  	return mallocgc(typ.Size_, typ, true)
  1325  }
  1326  
  1327  //go:linkname reflect_unsafe_New reflect.unsafe_New
  1328  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1329  	return mallocgc(typ.Size_, typ, true)
  1330  }
  1331  
  1332  //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1333  func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1334  	return mallocgc(typ.Size_, typ, true)
  1335  }
  1336  
  1337  // newarray allocates an array of n elements of type typ.
  1338  func newarray(typ *_type, n int) unsafe.Pointer {
  1339  	if n == 1 {
  1340  		return mallocgc(typ.Size_, typ, true)
  1341  	}
  1342  	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
  1343  	if overflow || mem > maxAlloc || n < 0 {
  1344  		panic(plainError("runtime: allocation size out of range"))
  1345  	}
  1346  	return mallocgc(mem, typ, true)
  1347  }
  1348  
  1349  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1350  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1351  	return newarray(typ, n)
  1352  }
  1353  
  1354  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1355  	c := getMCache(mp)
  1356  	if c == nil {
  1357  		throw("profilealloc called without a P or outside bootstrapping")
  1358  	}
  1359  	c.nextSample = nextSample()
  1360  	mProf_Malloc(x, size)
  1361  }
  1362  
  1363  // nextSample returns the next sampling point for heap profiling. The goal is
  1364  // to sample allocations on average every MemProfileRate bytes, but with a
  1365  // completely random distribution over the allocation timeline; this
  1366  // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1367  // processes, the distance between two samples follows the exponential
  1368  // distribution (exp(MemProfileRate)), so the best return value is a random
  1369  // number taken from an exponential distribution whose mean is MemProfileRate.
  1370  func nextSample() uintptr {
  1371  	if MemProfileRate == 1 {
  1372  		// Callers assign our return value to
  1373  		// mcache.next_sample, but next_sample is not used
  1374  		// when the rate is 1. So avoid the math below and
  1375  		// just return something.
  1376  		return 0
  1377  	}
  1378  	if GOOS == "plan9" {
  1379  		// Plan 9 doesn't support floating point in note handler.
  1380  		if gp := getg(); gp == gp.m.gsignal {
  1381  			return nextSampleNoFP()
  1382  		}
  1383  	}
  1384  
  1385  	return uintptr(fastexprand(MemProfileRate))
  1386  }
  1387  
  1388  // fastexprand returns a random number from an exponential distribution with
  1389  // the specified mean.
  1390  func fastexprand(mean int) int32 {
  1391  	// Avoid overflow. Maximum possible step is
  1392  	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1393  	switch {
  1394  	case mean > 0x7000000:
  1395  		mean = 0x7000000
  1396  	case mean == 0:
  1397  		return 0
  1398  	}
  1399  
  1400  	// Take a random sample of the exponential distribution exp(-mean*x).
  1401  	// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1402  	// p = 1 - exp(-mean*x), so
  1403  	// q = 1 - p == exp(-mean*x)
  1404  	// log_e(q) = -mean*x
  1405  	// -log_e(q)/mean = x
  1406  	// x = -log_e(q) * mean
  1407  	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1408  	const randomBitCount = 26
  1409  	q := fastrandn(1<<randomBitCount) + 1
  1410  	qlog := fastlog2(float64(q)) - randomBitCount
  1411  	if qlog > 0 {
  1412  		qlog = 0
  1413  	}
  1414  	const minusLog2 = -0.6931471805599453 // -ln(2)
  1415  	return int32(qlog*(minusLog2*float64(mean))) + 1
  1416  }
  1417  
  1418  // nextSampleNoFP is similar to nextSample, but uses older,
  1419  // simpler code to avoid floating point.
  1420  func nextSampleNoFP() uintptr {
  1421  	// Set first allocation sample size.
  1422  	rate := MemProfileRate
  1423  	if rate > 0x3fffffff { // make 2*rate not overflow
  1424  		rate = 0x3fffffff
  1425  	}
  1426  	if rate != 0 {
  1427  		return uintptr(fastrandn(uint32(2 * rate)))
  1428  	}
  1429  	return 0
  1430  }
  1431  
  1432  type persistentAlloc struct {
  1433  	base *notInHeap
  1434  	off  uintptr
  1435  }
  1436  
  1437  var globalAlloc struct {
  1438  	mutex
  1439  	persistentAlloc
  1440  }
  1441  
  1442  // persistentChunkSize is the number of bytes we allocate when we grow
  1443  // a persistentAlloc.
  1444  const persistentChunkSize = 256 << 10
  1445  
  1446  // persistentChunks is a list of all the persistent chunks we have
  1447  // allocated. The list is maintained through the first word in the
  1448  // persistent chunk. This is updated atomically.
  1449  var persistentChunks *notInHeap
  1450  
  1451  // Wrapper around sysAlloc that can allocate small chunks.
  1452  // There is no associated free operation.
  1453  // Intended for things like function/type/debug-related persistent data.
  1454  // If align is 0, uses default align (currently 8).
  1455  // The returned memory will be zeroed.
  1456  // sysStat must be non-nil.
  1457  //
  1458  // Consider marking persistentalloc'd types not in heap by embedding
  1459  // runtime/internal/sys.NotInHeap.
  1460  func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1461  	var p *notInHeap
  1462  	systemstack(func() {
  1463  		p = persistentalloc1(size, align, sysStat)
  1464  	})
  1465  	return unsafe.Pointer(p)
  1466  }
  1467  
  1468  // Must run on system stack because stack growth can (re)invoke it.
  1469  // See issue 9174.
  1470  //
  1471  //go:systemstack
  1472  func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
  1473  	const (
  1474  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1475  	)
  1476  
  1477  	if size == 0 {
  1478  		throw("persistentalloc: size == 0")
  1479  	}
  1480  	if align != 0 {
  1481  		if align&(align-1) != 0 {
  1482  			throw("persistentalloc: align is not a power of 2")
  1483  		}
  1484  		if align > _PageSize {
  1485  			throw("persistentalloc: align is too large")
  1486  		}
  1487  	} else {
  1488  		align = 8
  1489  	}
  1490  
  1491  	if size >= maxBlock {
  1492  		return (*notInHeap)(sysAlloc(size, sysStat))
  1493  	}
  1494  
  1495  	mp := acquirem()
  1496  	var persistent *persistentAlloc
  1497  	if mp != nil && mp.p != 0 {
  1498  		persistent = &mp.p.ptr().palloc
  1499  	} else {
  1500  		lock(&globalAlloc.mutex)
  1501  		persistent = &globalAlloc.persistentAlloc
  1502  	}
  1503  	persistent.off = alignUp(persistent.off, align)
  1504  	if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1505  		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
  1506  		if persistent.base == nil {
  1507  			if persistent == &globalAlloc.persistentAlloc {
  1508  				unlock(&globalAlloc.mutex)
  1509  			}
  1510  			throw("runtime: cannot allocate memory")
  1511  		}
  1512  
  1513  		// Add the new chunk to the persistentChunks list.
  1514  		for {
  1515  			chunks := uintptr(unsafe.Pointer(persistentChunks))
  1516  			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1517  			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1518  				break
  1519  			}
  1520  		}
  1521  		persistent.off = alignUp(goarch.PtrSize, align)
  1522  	}
  1523  	p := persistent.base.add(persistent.off)
  1524  	persistent.off += size
  1525  	releasem(mp)
  1526  	if persistent == &globalAlloc.persistentAlloc {
  1527  		unlock(&globalAlloc.mutex)
  1528  	}
  1529  
  1530  	if sysStat != &memstats.other_sys {
  1531  		sysStat.add(int64(size))
  1532  		memstats.other_sys.add(-int64(size))
  1533  	}
  1534  	return p
  1535  }
  1536  
  1537  // inPersistentAlloc reports whether p points to memory allocated by
  1538  // persistentalloc. This must be nosplit because it is called by the
  1539  // cgo checker code, which is called by the write barrier code.
  1540  //
  1541  //go:nosplit
  1542  func inPersistentAlloc(p uintptr) bool {
  1543  	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  1544  	for chunk != 0 {
  1545  		if p >= chunk && p < chunk+persistentChunkSize {
  1546  			return true
  1547  		}
  1548  		chunk = *(*uintptr)(unsafe.Pointer(chunk))
  1549  	}
  1550  	return false
  1551  }
  1552  
  1553  // linearAlloc is a simple linear allocator that pre-reserves a region
  1554  // of memory and then optionally maps that region into the Ready state
  1555  // as needed.
  1556  //
  1557  // The caller is responsible for locking.
  1558  type linearAlloc struct {
  1559  	next   uintptr // next free byte
  1560  	mapped uintptr // one byte past end of mapped space
  1561  	end    uintptr // end of reserved space
  1562  
  1563  	mapMemory bool // transition memory from Reserved to Ready if true
  1564  }
  1565  
  1566  func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
  1567  	if base+size < base {
  1568  		// Chop off the last byte. The runtime isn't prepared
  1569  		// to deal with situations where the bounds could overflow.
  1570  		// Leave that memory reserved, though, so we don't map it
  1571  		// later.
  1572  		size -= 1
  1573  	}
  1574  	l.next, l.mapped = base, base
  1575  	l.end = base + size
  1576  	l.mapMemory = mapMemory
  1577  }
  1578  
  1579  func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1580  	p := alignUp(l.next, align)
  1581  	if p+size > l.end {
  1582  		return nil
  1583  	}
  1584  	l.next = p + size
  1585  	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
  1586  		if l.mapMemory {
  1587  			// Transition from Reserved to Prepared to Ready.
  1588  			n := pEnd - l.mapped
  1589  			sysMap(unsafe.Pointer(l.mapped), n, sysStat)
  1590  			sysUsed(unsafe.Pointer(l.mapped), n, n)
  1591  		}
  1592  		l.mapped = pEnd
  1593  	}
  1594  	return unsafe.Pointer(p)
  1595  }
  1596  
  1597  // notInHeap is off-heap memory allocated by a lower-level allocator
  1598  // like sysAlloc or persistentAlloc.
  1599  //
  1600  // In general, it's better to use real types which embed
  1601  // runtime/internal/sys.NotInHeap, but this serves as a generic type
  1602  // for situations where that isn't possible (like in the allocators).
  1603  //
  1604  // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  1605  type notInHeap struct{ _ sys.NotInHeap }
  1606  
  1607  func (p *notInHeap) add(bytes uintptr) *notInHeap {
  1608  	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  1609  }
  1610  
  1611  // computeRZlog computes the size of the redzone.
  1612  // Refer to the implementation of the compiler-rt.
  1613  func computeRZlog(userSize uintptr) uintptr {
  1614  	switch {
  1615  	case userSize <= (64 - 16):
  1616  		return 16 << 0
  1617  	case userSize <= (128 - 32):
  1618  		return 16 << 1
  1619  	case userSize <= (512 - 64):
  1620  		return 16 << 2
  1621  	case userSize <= (4096 - 128):
  1622  		return 16 << 3
  1623  	case userSize <= (1<<14)-256:
  1624  		return 16 << 4
  1625  	case userSize <= (1<<15)-512:
  1626  		return 16 << 5
  1627  	case userSize <= (1<<16)-1024:
  1628  		return 16 << 6
  1629  	default:
  1630  		return 16 << 7
  1631  	}
  1632  }