github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/malloc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of in-use pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan's
    60  //	   pages are returned to the mheap and the mspan is now dead.
    61  //
    62  // Allocating and freeing a large object uses the mheap
    63  // directly, bypassing the mcache and mcentral.
    64  //
    65  // If mspan.needzero is false, then free object slots in the mspan are
    66  // already zeroed. Otherwise if needzero is true, objects are zeroed as
    67  // they are allocated. There are various benefits to delaying zeroing
    68  // this way:
    69  //
    70  //	1. Stack frame allocation can avoid zeroing altogether.
    71  //
    72  //	2. It exhibits better temporal locality, since the program is
    73  //	   probably about to write to the memory.
    74  //
    75  //	3. We don't zero pages that never get reused.
    76  
    77  // Virtual memory layout
    78  //
    79  // The heap consists of a set of arenas, which are 64MB on 64-bit and
    80  // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    81  // aligned to the arena size.
    82  //
    83  // Each arena has an associated heapArena object that stores the
    84  // metadata for that arena: the heap bitmap for all words in the arena
    85  // and the span map for all pages in the arena. heapArena objects are
    86  // themselves allocated off-heap.
    87  //
    88  // Since arenas are aligned, the address space can be viewed as a
    89  // series of arena frames. The arena map (mheap_.arenas) maps from
    90  // arena frame number to *heapArena, or nil for parts of the address
    91  // space not backed by the Go heap. The arena map is structured as a
    92  // two-level array consisting of a "L1" arena map and many "L2" arena
    93  // maps; however, since arenas are large, on many architectures, the
    94  // arena map consists of a single, large L2 map.
    95  //
    96  // The arena map covers the entire possible address space, allowing
    97  // the Go heap to use any part of the address space. The allocator
    98  // attempts to keep arenas contiguous so that large spans (and hence
    99  // large objects) can cross arenas.
   100  
   101  package runtime
   102  
   103  import (
   104  	"internal/goarch"
   105  	"internal/goos"
   106  	"runtime/internal/atomic"
   107  	"runtime/internal/math"
   108  	"runtime/internal/sys"
   109  	"unsafe"
   110  )
   111  
   112  const (
   113  	debugMalloc = false
   114  
   115  	maxTinySize   = _TinySize
   116  	tinySizeClass = _TinySizeClass
   117  	maxSmallSize  = _MaxSmallSize
   118  
   119  	pageShift = _PageShift
   120  	pageSize  = _PageSize
   121  	pageMask  = _PageMask
   122  	// By construction, single page spans of the smallest object class
   123  	// have the most objects per span.
   124  	maxObjsPerSpan = pageSize / 8
   125  
   126  	concurrentSweep = _ConcurrentSweep
   127  
   128  	_PageSize = 1 << _PageShift
   129  	_PageMask = _PageSize - 1
   130  
   131  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   132  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   133  
   134  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   135  	_TinySize      = 16
   136  	_TinySizeClass = int8(2)
   137  
   138  	_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   139  
   140  	// Per-P, per order stack segment cache size.
   141  	_StackCacheSize = 32 * 1024
   142  
   143  	// Number of orders that get caching. Order 0 is FixedStack
   144  	// and each successive order is twice as large.
   145  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   146  	// will be allocated directly.
   147  	// Since FixedStack is different on different systems, we
   148  	// must vary NumStackOrders to keep the same maximum cached size.
   149  	//   OS               | FixedStack | NumStackOrders
   150  	//   -----------------+------------+---------------
   151  	//   linux/darwin/bsd | 2KB        | 4
   152  	//   windows/32       | 4KB        | 3
   153  	//   windows/64       | 8KB        | 2
   154  	//   plan9            | 4KB        | 3
   155  	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
   156  
   157  	// heapAddrBits is the number of bits in a heap address. On
   158  	// amd64, addresses are sign-extended beyond heapAddrBits. On
   159  	// other arches, they are zero-extended.
   160  	//
   161  	// On most 64-bit platforms, we limit this to 48 bits based on a
   162  	// combination of hardware and OS limitations.
   163  	//
   164  	// amd64 hardware limits addresses to 48 bits, sign-extended
   165  	// to 64 bits. Addresses where the top 16 bits are not either
   166  	// all 0 or all 1 are "non-canonical" and invalid. Because of
   167  	// these "negative" addresses, we offset addresses by 1<<47
   168  	// (arenaBaseOffset) on amd64 before computing indexes into
   169  	// the heap arenas index. In 2017, amd64 hardware added
   170  	// support for 57 bit addresses; however, currently only Linux
   171  	// supports this extension and the kernel will never choose an
   172  	// address above 1<<47 unless mmap is called with a hint
   173  	// address above 1<<47 (which we never do).
   174  	//
   175  	// arm64 hardware (as of ARMv8) limits user addresses to 48
   176  	// bits, in the range [0, 1<<48).
   177  	//
   178  	// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   179  	// in hardware. On Linux, Go leans on stricter OS limits. Based
   180  	// on Linux's processor.h, the user address space is limited as
   181  	// follows on 64-bit architectures:
   182  	//
   183  	// Architecture  Name              Maximum Value (exclusive)
   184  	// ---------------------------------------------------------------------
   185  	// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   186  	// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   187  	// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   188  	// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   189  	// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   190  	//
   191  	// These limits may increase over time, but are currently at
   192  	// most 48 bits except on s390x. On all architectures, Linux
   193  	// starts placing mmap'd regions at addresses that are
   194  	// significantly below 48 bits, so even if it's possible to
   195  	// exceed Go's 48 bit limit, it's extremely unlikely in
   196  	// practice.
   197  	//
   198  	// On 32-bit platforms, we accept the full 32-bit address
   199  	// space because doing so is cheap.
   200  	// mips32 only has access to the low 2GB of virtual memory, so
   201  	// we further limit it to 31 bits.
   202  	//
   203  	// On ios/arm64, although 64-bit pointers are presumably
   204  	// available, pointers are truncated to 33 bits in iOS <14.
   205  	// Furthermore, only the top 4 GiB of the address space are
   206  	// actually available to the application. In iOS >=14, more
   207  	// of the address space is available, and the OS can now
   208  	// provide addresses outside of those 33 bits. Pick 40 bits
   209  	// as a reasonable balance between address space usage by the
   210  	// page allocator, and flexibility for what mmap'd regions
   211  	// we'll accept for the heap. We can't just move to the full
   212  	// 48 bits because this uses too much address space for older
   213  	// iOS versions.
   214  	// TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
   215  	// to a 48-bit address space like every other arm64 platform.
   216  	//
   217  	// WebAssembly currently has a limit of 4GB linear memory.
   218  	heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
   219  
   220  	// maxAlloc is the maximum size of an allocation. On 64-bit,
   221  	// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   222  	// 32-bit, however, this is one less than 1<<32 because the
   223  	// number of bytes in the address space doesn't actually fit
   224  	// in a uintptr.
   225  	maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   226  
   227  	// The number of bits in a heap address, the size of heap
   228  	// arenas, and the L1 and L2 arena map sizes are related by
   229  	//
   230  	//   (1 << addr bits) = arena size * L1 entries * L2 entries
   231  	//
   232  	// Currently, we balance these as follows:
   233  	//
   234  	//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   235  	// --------------  ---------  ----------  ----------  -----------
   236  	//       */64-bit         48        64MB           1    4M (32MB)
   237  	// windows/64-bit         48         4MB          64    1M  (8MB)
   238  	//      ios/arm64         33         4MB           1  2048  (8KB)
   239  	//       */32-bit         32         4MB           1  1024  (4KB)
   240  	//     */mips(le)         31         4MB           1   512  (2KB)
   241  
   242  	// heapArenaBytes is the size of a heap arena. The heap
   243  	// consists of mappings of size heapArenaBytes, aligned to
   244  	// heapArenaBytes. The initial heap mapping is one arena.
   245  	//
   246  	// This is currently 64MB on 64-bit non-Windows and 4MB on
   247  	// 32-bit and on Windows. We use smaller arenas on Windows
   248  	// because all committed memory is charged to the process,
   249  	// even if it's not touched. Hence, for processes with small
   250  	// heaps, the mapped arena space needs to be commensurate.
   251  	// This is particularly important with the race detector,
   252  	// since it significantly amplifies the cost of committed
   253  	// memory.
   254  	heapArenaBytes = 1 << logHeapArenaBytes
   255  
   256  	// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   257  	// prefer using heapArenaBytes where possible (we need the
   258  	// constant to compute some other constants).
   259  	logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
   260  
   261  	// heapArenaBitmapBytes is the size of each heap arena's bitmap.
   262  	heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
   263  
   264  	pagesPerArena = heapArenaBytes / pageSize
   265  
   266  	// arenaL1Bits is the number of bits of the arena number
   267  	// covered by the first level arena map.
   268  	//
   269  	// This number should be small, since the first level arena
   270  	// map requires PtrSize*(1<<arenaL1Bits) of space in the
   271  	// binary's BSS. It can be zero, in which case the first level
   272  	// index is effectively unused. There is a performance benefit
   273  	// to this, since the generated code can be more efficient,
   274  	// but comes at the cost of having a large L2 mapping.
   275  	//
   276  	// We use the L1 map on 64-bit Windows because the arena size
   277  	// is small, but the address space is still 48 bits, and
   278  	// there's a high cost to having a large L2.
   279  	arenaL1Bits = 6 * (_64bit * goos.IsWindows)
   280  
   281  	// arenaL2Bits is the number of bits of the arena number
   282  	// covered by the second level arena index.
   283  	//
   284  	// The size of each arena map allocation is proportional to
   285  	// 1<<arenaL2Bits, so it's important that this not be too
   286  	// large. 48 bits leads to 32MB arena index allocations, which
   287  	// is about the practical threshold.
   288  	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   289  
   290  	// arenaL1Shift is the number of bits to shift an arena frame
   291  	// number by to compute an index into the first level arena map.
   292  	arenaL1Shift = arenaL2Bits
   293  
   294  	// arenaBits is the total bits in a combined arena map index.
   295  	// This is split between the index into the L1 arena map and
   296  	// the L2 arena map.
   297  	arenaBits = arenaL1Bits + arenaL2Bits
   298  
   299  	// arenaBaseOffset is the pointer value that corresponds to
   300  	// index 0 in the heap arena map.
   301  	//
   302  	// On amd64, the address space is 48 bits, sign extended to 64
   303  	// bits. This offset lets us handle "negative" addresses (or
   304  	// high addresses if viewed as unsigned).
   305  	//
   306  	// On aix/ppc64, this offset allows to keep the heapAddrBits to
   307  	// 48. Otherwise, it would be 60 in order to handle mmap addresses
   308  	// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
   309  	// case, the memory reserved in (s *pageAlloc).init for chunks
   310  	// is causing important slowdowns.
   311  	//
   312  	// On other platforms, the user address space is contiguous
   313  	// and starts at 0, so no offset is necessary.
   314  	arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
   315  	// A typed version of this constant that will make it into DWARF (for viewcore).
   316  	arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
   317  
   318  	// Max number of threads to run garbage collection.
   319  	// 2, 3, and 4 are all plausible maximums depending
   320  	// on the hardware details of the machine. The garbage
   321  	// collector scales well to 32 cpus.
   322  	_MaxGcproc = 32
   323  
   324  	// minLegalPointer is the smallest possible legal pointer.
   325  	// This is the smallest possible architectural page size,
   326  	// since we assume that the first page is never mapped.
   327  	//
   328  	// This should agree with minZeroPage in the compiler.
   329  	minLegalPointer uintptr = 4096
   330  )
   331  
   332  // physPageSize is the size in bytes of the OS's physical pages.
   333  // Mapping and unmapping operations must be done at multiples of
   334  // physPageSize.
   335  //
   336  // This must be set by the OS init code (typically in osinit) before
   337  // mallocinit.
   338  var physPageSize uintptr
   339  
   340  // physHugePageSize is the size in bytes of the OS's default physical huge
   341  // page size whose allocation is opaque to the application. It is assumed
   342  // and verified to be a power of two.
   343  //
   344  // If set, this must be set by the OS init code (typically in osinit) before
   345  // mallocinit. However, setting it at all is optional, and leaving the default
   346  // value is always safe (though potentially less efficient).
   347  //
   348  // Since physHugePageSize is always assumed to be a power of two,
   349  // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   350  // The purpose of physHugePageShift is to avoid doing divisions in
   351  // performance critical functions.
   352  var (
   353  	physHugePageSize  uintptr
   354  	physHugePageShift uint
   355  )
   356  
   357  // OS memory management abstraction layer
   358  //
   359  // Regions of the address space managed by the runtime may be in one of four
   360  // states at any given time:
   361  // 1) None - Unreserved and unmapped, the default state of any region.
   362  // 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
   363  //               Does not count against the process' memory footprint.
   364  // 3) Prepared - Reserved, intended not to be backed by physical memory (though
   365  //               an OS may implement this lazily). Can transition efficiently to
   366  //               Ready. Accessing memory in such a region is undefined (may
   367  //               fault, may give back unexpected zeroes, etc.).
   368  // 4) Ready - may be accessed safely.
   369  //
   370  // This set of states is more than is strictly necessary to support all the
   371  // currently supported platforms. One could get by with just None, Reserved, and
   372  // Ready. However, the Prepared state gives us flexibility for performance
   373  // purposes. For example, on POSIX-y operating systems, Reserved is usually a
   374  // private anonymous mmap'd region with PROT_NONE set, and to transition
   375  // to Ready would require setting PROT_READ|PROT_WRITE. However the
   376  // underspecification of Prepared lets us use just MADV_FREE to transition from
   377  // Ready to Prepared. Thus with the Prepared state we can set the permission
   378  // bits just once early on, we can efficiently tell the OS that it's free to
   379  // take pages away from us when we don't strictly need them.
   380  //
   381  // For each OS there is a common set of helpers defined that transition
   382  // memory regions between these states. The helpers are as follows:
   383  //
   384  // sysAlloc transitions an OS-chosen region of memory from None to Ready.
   385  // More specifically, it obtains a large chunk of zeroed memory from the
   386  // operating system, typically on the order of a hundred kilobytes
   387  // or a megabyte. This memory is always immediately available for use.
   388  //
   389  // sysFree transitions a memory region from any state to None. Therefore, it
   390  // returns memory unconditionally. It is used if an out-of-memory error has been
   391  // detected midway through an allocation or to carve out an aligned section of
   392  // the address space. It is okay if sysFree is a no-op only if sysReserve always
   393  // returns a memory region aligned to the heap allocator's alignment
   394  // restrictions.
   395  //
   396  // sysReserve transitions a memory region from None to Reserved. It reserves
   397  // address space in such a way that it would cause a fatal fault upon access
   398  // (either via permissions or not committing the memory). Such a reservation is
   399  // thus never backed by physical memory.
   400  // If the pointer passed to it is non-nil, the caller wants the
   401  // reservation there, but sysReserve can still choose another
   402  // location if that one is unavailable.
   403  // NOTE: sysReserve returns OS-aligned memory, but the heap allocator
   404  // may use larger alignment, so the caller must be careful to realign the
   405  // memory obtained by sysReserve.
   406  //
   407  // sysMap transitions a memory region from Reserved to Prepared. It ensures the
   408  // memory region can be efficiently transitioned to Ready.
   409  //
   410  // sysUsed transitions a memory region from Prepared to Ready. It notifies the
   411  // operating system that the memory region is needed and ensures that the region
   412  // may be safely accessed. This is typically a no-op on systems that don't have
   413  // an explicit commit step and hard over-commit limits, but is critical on
   414  // Windows, for example.
   415  //
   416  // sysUnused transitions a memory region from Ready to Prepared. It notifies the
   417  // operating system that the physical pages backing this memory region are no
   418  // longer needed and can be reused for other purposes. The contents of a
   419  // sysUnused memory region are considered forfeit and the region must not be
   420  // accessed again until sysUsed is called.
   421  //
   422  // sysFault transitions a memory region from Ready or Prepared to Reserved. It
   423  // marks a region such that it will always fault if accessed. Used only for
   424  // debugging the runtime.
   425  
   426  func mallocinit() {
   427  	if class_to_size[_TinySizeClass] != _TinySize {
   428  		throw("bad TinySizeClass")
   429  	}
   430  
   431  	if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
   432  		// heapBits expects modular arithmetic on bitmap
   433  		// addresses to work.
   434  		throw("heapArenaBitmapBytes not a power of 2")
   435  	}
   436  
   437  	// Copy class sizes out for statistics table.
   438  	for i := range class_to_size {
   439  		memstats.by_size[i].size = uint32(class_to_size[i])
   440  	}
   441  
   442  	// Check physPageSize.
   443  	if physPageSize == 0 {
   444  		// The OS init code failed to fetch the physical page size.
   445  		throw("failed to get system page size")
   446  	}
   447  	if physPageSize > maxPhysPageSize {
   448  		print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
   449  		throw("bad system page size")
   450  	}
   451  	if physPageSize < minPhysPageSize {
   452  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   453  		throw("bad system page size")
   454  	}
   455  	if physPageSize&(physPageSize-1) != 0 {
   456  		print("system page size (", physPageSize, ") must be a power of 2\n")
   457  		throw("bad system page size")
   458  	}
   459  	if physHugePageSize&(physHugePageSize-1) != 0 {
   460  		print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   461  		throw("bad system huge page size")
   462  	}
   463  	if physHugePageSize > maxPhysHugePageSize {
   464  		// physHugePageSize is greater than the maximum supported huge page size.
   465  		// Don't throw here, like in the other cases, since a system configured
   466  		// in this way isn't wrong, we just don't have the code to support them.
   467  		// Instead, silently set the huge page size to zero.
   468  		physHugePageSize = 0
   469  	}
   470  	if physHugePageSize != 0 {
   471  		// Since physHugePageSize is a power of 2, it suffices to increase
   472  		// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   473  		for 1<<physHugePageShift != physHugePageSize {
   474  			physHugePageShift++
   475  		}
   476  	}
   477  	if pagesPerArena%pagesPerSpanRoot != 0 {
   478  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
   479  		throw("bad pagesPerSpanRoot")
   480  	}
   481  	if pagesPerArena%pagesPerReclaimerChunk != 0 {
   482  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
   483  		throw("bad pagesPerReclaimerChunk")
   484  	}
   485  
   486  	// Initialize the heap.
   487  	mheap_.init()
   488  	mcache0 = allocmcache()
   489  	lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
   490  	lockInit(&proflock, lockRankProf)
   491  	lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
   492  
   493  	// Create initial arena growth hints.
   494  	if goarch.PtrSize == 8 {
   495  		// On a 64-bit machine, we pick the following hints
   496  		// because:
   497  		//
   498  		// 1. Starting from the middle of the address space
   499  		// makes it easier to grow out a contiguous range
   500  		// without running in to some other mapping.
   501  		//
   502  		// 2. This makes Go heap addresses more easily
   503  		// recognizable when debugging.
   504  		//
   505  		// 3. Stack scanning in gccgo is still conservative,
   506  		// so it's important that addresses be distinguishable
   507  		// from other data.
   508  		//
   509  		// Starting at 0x00c0 means that the valid memory addresses
   510  		// will begin 0x00c0, 0x00c1, ...
   511  		// In little-endian, that's c0 00, c1 00, ... None of those are valid
   512  		// UTF-8 sequences, and they are otherwise as far away from
   513  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   514  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   515  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   516  		// AddressSanitizer which reserves all memory up to 0x0100.
   517  		// These choices reduce the odds of a conservative garbage collector
   518  		// not collecting memory because some non-pointer block of memory
   519  		// had a bit pattern that matched a memory address.
   520  		//
   521  		// However, on arm64, we ignore all this advice above and slam the
   522  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   523  		// translation buffers, the user address space is limited to 39 bits
   524  		// On ios/arm64, the address space is even smaller.
   525  		//
   526  		// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   527  		// processes.
   528  		for i := 0x7f; i >= 0; i-- {
   529  			var p uintptr
   530  			switch {
   531  			case raceenabled:
   532  				// The TSAN runtime requires the heap
   533  				// to be in the range [0x00c000000000,
   534  				// 0x00e000000000).
   535  				p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   536  				if p >= uintptrMask&0x00e000000000 {
   537  					continue
   538  				}
   539  			case GOARCH == "arm64" && GOOS == "ios":
   540  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   541  			case GOARCH == "arm64":
   542  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   543  			case GOOS == "aix":
   544  				if i == 0 {
   545  					// We don't use addresses directly after 0x0A00000000000000
   546  					// to avoid collisions with others mmaps done by non-go programs.
   547  					continue
   548  				}
   549  				p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   550  			default:
   551  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   552  			}
   553  			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   554  			hint.addr = p
   555  			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   556  		}
   557  	} else {
   558  		// On a 32-bit machine, we're much more concerned
   559  		// about keeping the usable heap contiguous.
   560  		// Hence:
   561  		//
   562  		// 1. We reserve space for all heapArenas up front so
   563  		// they don't get interleaved with the heap. They're
   564  		// ~258MB, so this isn't too bad. (We could reserve a
   565  		// smaller amount of space up front if this is a
   566  		// problem.)
   567  		//
   568  		// 2. We hint the heap to start right above the end of
   569  		// the binary so we have the best chance of keeping it
   570  		// contiguous.
   571  		//
   572  		// 3. We try to stake out a reasonably large initial
   573  		// heap reservation.
   574  
   575  		const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   576  		meta := uintptr(sysReserve(nil, arenaMetaSize))
   577  		if meta != 0 {
   578  			mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
   579  		}
   580  
   581  		// We want to start the arena low, but if we're linked
   582  		// against C code, it's possible global constructors
   583  		// have called malloc and adjusted the process' brk.
   584  		// Query the brk so we can avoid trying to map the
   585  		// region over it (which will cause the kernel to put
   586  		// the region somewhere else, likely at a high
   587  		// address).
   588  		procBrk := sbrk0()
   589  
   590  		// If we ask for the end of the data segment but the
   591  		// operating system requires a little more space
   592  		// before we can start allocating, it will give out a
   593  		// slightly higher pointer. Except QEMU, which is
   594  		// buggy, as usual: it won't adjust the pointer
   595  		// upward. So adjust it upward a little bit ourselves:
   596  		// 1/4 MB to get away from the running binary image.
   597  		p := firstmoduledata.end
   598  		if p < procBrk {
   599  			p = procBrk
   600  		}
   601  		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   602  			p = mheap_.heapArenaAlloc.end
   603  		}
   604  		p = alignUp(p+(256<<10), heapArenaBytes)
   605  		// Because we're worried about fragmentation on
   606  		// 32-bit, we try to make a large initial reservation.
   607  		arenaSizes := []uintptr{
   608  			512 << 20,
   609  			256 << 20,
   610  			128 << 20,
   611  		}
   612  		for _, arenaSize := range arenaSizes {
   613  			a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
   614  			if a != nil {
   615  				mheap_.arena.init(uintptr(a), size, false)
   616  				p = mheap_.arena.end // For hint below
   617  				break
   618  			}
   619  		}
   620  		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   621  		hint.addr = p
   622  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   623  	}
   624  }
   625  
   626  // sysAlloc allocates heap arena space for at least n bytes. The
   627  // returned pointer is always heapArenaBytes-aligned and backed by
   628  // h.arenas metadata. The returned size is always a multiple of
   629  // heapArenaBytes. sysAlloc returns nil on failure.
   630  // There is no corresponding free function.
   631  //
   632  // sysAlloc returns a memory region in the Reserved state. This region must
   633  // be transitioned to Prepared and then Ready before use.
   634  //
   635  // h must be locked.
   636  func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
   637  	assertLockHeld(&h.lock)
   638  
   639  	n = alignUp(n, heapArenaBytes)
   640  
   641  	// First, try the arena pre-reservation.
   642  	v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
   643  	if v != nil {
   644  		size = n
   645  		goto mapped
   646  	}
   647  
   648  	// Try to grow the heap at a hint address.
   649  	for h.arenaHints != nil {
   650  		hint := h.arenaHints
   651  		p := hint.addr
   652  		if hint.down {
   653  			p -= n
   654  		}
   655  		if p+n < p {
   656  			// We can't use this, so don't ask.
   657  			v = nil
   658  		} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   659  			// Outside addressable heap. Can't use.
   660  			v = nil
   661  		} else {
   662  			v = sysReserve(unsafe.Pointer(p), n)
   663  		}
   664  		if p == uintptr(v) {
   665  			// Success. Update the hint.
   666  			if !hint.down {
   667  				p += n
   668  			}
   669  			hint.addr = p
   670  			size = n
   671  			break
   672  		}
   673  		// Failed. Discard this hint and try the next.
   674  		//
   675  		// TODO: This would be cleaner if sysReserve could be
   676  		// told to only return the requested address. In
   677  		// particular, this is already how Windows behaves, so
   678  		// it would simplify things there.
   679  		if v != nil {
   680  			sysFree(v, n, nil)
   681  		}
   682  		h.arenaHints = hint.next
   683  		h.arenaHintAlloc.free(unsafe.Pointer(hint))
   684  	}
   685  
   686  	if size == 0 {
   687  		if raceenabled {
   688  			// The race detector assumes the heap lives in
   689  			// [0x00c000000000, 0x00e000000000), but we
   690  			// just ran out of hints in this region. Give
   691  			// a nice failure.
   692  			throw("too many address space collisions for -race mode")
   693  		}
   694  
   695  		// All of the hints failed, so we'll take any
   696  		// (sufficiently aligned) address the kernel will give
   697  		// us.
   698  		v, size = sysReserveAligned(nil, n, heapArenaBytes)
   699  		if v == nil {
   700  			return nil, 0
   701  		}
   702  
   703  		// Create new hints for extending this region.
   704  		hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   705  		hint.addr, hint.down = uintptr(v), true
   706  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   707  		hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   708  		hint.addr = uintptr(v) + size
   709  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   710  	}
   711  
   712  	// Check for bad pointers or pointers we can't use.
   713  	{
   714  		var bad string
   715  		p := uintptr(v)
   716  		if p+size < p {
   717  			bad = "region exceeds uintptr range"
   718  		} else if arenaIndex(p) >= 1<<arenaBits {
   719  			bad = "base outside usable address space"
   720  		} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   721  			bad = "end outside usable address space"
   722  		}
   723  		if bad != "" {
   724  			// This should be impossible on most architectures,
   725  			// but it would be really confusing to debug.
   726  			print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   727  			throw("memory reservation exceeds address space limit")
   728  		}
   729  	}
   730  
   731  	if uintptr(v)&(heapArenaBytes-1) != 0 {
   732  		throw("misrounded allocation in sysAlloc")
   733  	}
   734  
   735  mapped:
   736  	// Create arena metadata.
   737  	for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   738  		l2 := h.arenas[ri.l1()]
   739  		if l2 == nil {
   740  			// Allocate an L2 arena map.
   741  			l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
   742  			if l2 == nil {
   743  				throw("out of memory allocating heap arena map")
   744  			}
   745  			atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   746  		}
   747  
   748  		if l2[ri.l2()] != nil {
   749  			throw("arena already initialized")
   750  		}
   751  		var r *heapArena
   752  		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   753  		if r == nil {
   754  			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   755  			if r == nil {
   756  				throw("out of memory allocating heap arena metadata")
   757  			}
   758  		}
   759  
   760  		// Add the arena to the arenas list.
   761  		if len(h.allArenas) == cap(h.allArenas) {
   762  			size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
   763  			if size == 0 {
   764  				size = physPageSize
   765  			}
   766  			newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
   767  			if newArray == nil {
   768  				throw("out of memory allocating allArenas")
   769  			}
   770  			oldSlice := h.allArenas
   771  			*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
   772  			copy(h.allArenas, oldSlice)
   773  			// Do not free the old backing array because
   774  			// there may be concurrent readers. Since we
   775  			// double the array each time, this can lead
   776  			// to at most 2x waste.
   777  		}
   778  		h.allArenas = h.allArenas[:len(h.allArenas)+1]
   779  		h.allArenas[len(h.allArenas)-1] = ri
   780  
   781  		// Store atomically just in case an object from the
   782  		// new heap arena becomes visible before the heap lock
   783  		// is released (which shouldn't happen, but there's
   784  		// little downside to this).
   785  		atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   786  	}
   787  
   788  	// Tell the race detector about the new heap memory.
   789  	if raceenabled {
   790  		racemapshadow(v, size)
   791  	}
   792  
   793  	return
   794  }
   795  
   796  // sysReserveAligned is like sysReserve, but the returned pointer is
   797  // aligned to align bytes. It may reserve either n or n+align bytes,
   798  // so it returns the size that was reserved.
   799  func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
   800  	// Since the alignment is rather large in uses of this
   801  	// function, we're not likely to get it by chance, so we ask
   802  	// for a larger region and remove the parts we don't need.
   803  	retries := 0
   804  retry:
   805  	p := uintptr(sysReserve(v, size+align))
   806  	switch {
   807  	case p == 0:
   808  		return nil, 0
   809  	case p&(align-1) == 0:
   810  		// We got lucky and got an aligned region, so we can
   811  		// use the whole thing.
   812  		return unsafe.Pointer(p), size + align
   813  	case GOOS == "windows":
   814  		// On Windows we can't release pieces of a
   815  		// reservation, so we release the whole thing and
   816  		// re-reserve the aligned sub-region. This may race,
   817  		// so we may have to try again.
   818  		sysFree(unsafe.Pointer(p), size+align, nil)
   819  		p = alignUp(p, align)
   820  		p2 := sysReserve(unsafe.Pointer(p), size)
   821  		if p != uintptr(p2) {
   822  			// Must have raced. Try again.
   823  			sysFree(p2, size, nil)
   824  			if retries++; retries == 100 {
   825  				throw("failed to allocate aligned heap memory; too many retries")
   826  			}
   827  			goto retry
   828  		}
   829  		// Success.
   830  		return p2, size
   831  	default:
   832  		// Trim off the unaligned parts.
   833  		pAligned := alignUp(p, align)
   834  		sysFree(unsafe.Pointer(p), pAligned-p, nil)
   835  		end := pAligned + size
   836  		endLen := (p + size + align) - end
   837  		if endLen > 0 {
   838  			sysFree(unsafe.Pointer(end), endLen, nil)
   839  		}
   840  		return unsafe.Pointer(pAligned), size
   841  	}
   842  }
   843  
   844  // base address for all 0-byte allocations
   845  var zerobase uintptr
   846  
   847  // nextFreeFast returns the next free object if one is quickly available.
   848  // Otherwise it returns 0.
   849  func nextFreeFast(s *mspan) gclinkptr {
   850  	theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
   851  	if theBit < 64 {
   852  		result := s.freeindex + uintptr(theBit)
   853  		if result < s.nelems {
   854  			freeidx := result + 1
   855  			if freeidx%64 == 0 && freeidx != s.nelems {
   856  				return 0
   857  			}
   858  			s.allocCache >>= uint(theBit + 1)
   859  			s.freeindex = freeidx
   860  			s.allocCount++
   861  			return gclinkptr(result*s.elemsize + s.base())
   862  		}
   863  	}
   864  	return 0
   865  }
   866  
   867  // nextFree returns the next free object from the cached span if one is available.
   868  // Otherwise it refills the cache with a span with an available object and
   869  // returns that object along with a flag indicating that this was a heavy
   870  // weight allocation. If it is a heavy weight allocation the caller must
   871  // determine whether a new GC cycle needs to be started or if the GC is active
   872  // whether this goroutine needs to assist the GC.
   873  //
   874  // Must run in a non-preemptible context since otherwise the owner of
   875  // c could change.
   876  func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   877  	s = c.alloc[spc]
   878  	shouldhelpgc = false
   879  	freeIndex := s.nextFreeIndex()
   880  	if freeIndex == s.nelems {
   881  		// The span is full.
   882  		if uintptr(s.allocCount) != s.nelems {
   883  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   884  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   885  		}
   886  		c.refill(spc)
   887  		shouldhelpgc = true
   888  		s = c.alloc[spc]
   889  
   890  		freeIndex = s.nextFreeIndex()
   891  	}
   892  
   893  	if freeIndex >= s.nelems {
   894  		throw("freeIndex is not valid")
   895  	}
   896  
   897  	v = gclinkptr(freeIndex*s.elemsize + s.base())
   898  	s.allocCount++
   899  	if uintptr(s.allocCount) > s.nelems {
   900  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   901  		throw("s.allocCount > s.nelems")
   902  	}
   903  	return
   904  }
   905  
   906  // Allocate an object of size bytes.
   907  // Small objects are allocated from the per-P cache's free lists.
   908  // Large objects (> 32 kB) are allocated straight from the heap.
   909  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   910  	if gcphase == _GCmarktermination {
   911  		throw("mallocgc called with gcphase == _GCmarktermination")
   912  	}
   913  
   914  	if size == 0 {
   915  		return unsafe.Pointer(&zerobase)
   916  	}
   917  	userSize := size
   918  	if asanenabled {
   919  		// Refer to ASAN runtime library, the malloc() function allocates extra memory,
   920  		// the redzone, around the user requested memory region. And the redzones are marked
   921  		// as unaddressable. We perform the same operations in Go to detect the overflows or
   922  		// underflows.
   923  		size += computeRZlog(size)
   924  	}
   925  
   926  	if debug.malloc {
   927  		if debug.sbrk != 0 {
   928  			align := uintptr(16)
   929  			if typ != nil {
   930  				// TODO(austin): This should be just
   931  				//   align = uintptr(typ.align)
   932  				// but that's only 4 on 32-bit platforms,
   933  				// even if there's a uint64 field in typ (see #599).
   934  				// This causes 64-bit atomic accesses to panic.
   935  				// Hence, we use stricter alignment that matches
   936  				// the normal allocator better.
   937  				if size&7 == 0 {
   938  					align = 8
   939  				} else if size&3 == 0 {
   940  					align = 4
   941  				} else if size&1 == 0 {
   942  					align = 2
   943  				} else {
   944  					align = 1
   945  				}
   946  			}
   947  			return persistentalloc(size, align, &memstats.other_sys)
   948  		}
   949  
   950  		if inittrace.active && inittrace.id == getg().goid {
   951  			// Init functions are executed sequentially in a single goroutine.
   952  			inittrace.allocs += 1
   953  		}
   954  	}
   955  
   956  	// assistG is the G to charge for this allocation, or nil if
   957  	// GC is not currently active.
   958  	var assistG *g
   959  	if gcBlackenEnabled != 0 {
   960  		// Charge the current user G for this allocation.
   961  		assistG = getg()
   962  		if assistG.m.curg != nil {
   963  			assistG = assistG.m.curg
   964  		}
   965  		// Charge the allocation against the G. We'll account
   966  		// for internal fragmentation at the end of mallocgc.
   967  		assistG.gcAssistBytes -= int64(size)
   968  
   969  		if assistG.gcAssistBytes < 0 {
   970  			// This G is in debt. Assist the GC to correct
   971  			// this before allocating. This must happen
   972  			// before disabling preemption.
   973  			gcAssistAlloc(assistG)
   974  		}
   975  	}
   976  
   977  	// Set mp.mallocing to keep from being preempted by GC.
   978  	mp := acquirem()
   979  	if mp.mallocing != 0 {
   980  		throw("malloc deadlock")
   981  	}
   982  	if mp.gsignal == getg() {
   983  		throw("malloc during signal")
   984  	}
   985  	mp.mallocing = 1
   986  
   987  	shouldhelpgc := false
   988  	dataSize := userSize
   989  	c := getMCache(mp)
   990  	if c == nil {
   991  		throw("mallocgc called without a P or outside bootstrapping")
   992  	}
   993  	var span *mspan
   994  	var x unsafe.Pointer
   995  	noscan := typ == nil || typ.ptrdata == 0
   996  	// In some cases block zeroing can profitably (for latency reduction purposes)
   997  	// be delayed till preemption is possible; delayedZeroing tracks that state.
   998  	delayedZeroing := false
   999  	if size <= maxSmallSize {
  1000  		if noscan && size < maxTinySize {
  1001  			// Tiny allocator.
  1002  			//
  1003  			// Tiny allocator combines several tiny allocation requests
  1004  			// into a single memory block. The resulting memory block
  1005  			// is freed when all subobjects are unreachable. The subobjects
  1006  			// must be noscan (don't have pointers), this ensures that
  1007  			// the amount of potentially wasted memory is bounded.
  1008  			//
  1009  			// Size of the memory block used for combining (maxTinySize) is tunable.
  1010  			// Current setting is 16 bytes, which relates to 2x worst case memory
  1011  			// wastage (when all but one subobjects are unreachable).
  1012  			// 8 bytes would result in no wastage at all, but provides less
  1013  			// opportunities for combining.
  1014  			// 32 bytes provides more opportunities for combining,
  1015  			// but can lead to 4x worst case wastage.
  1016  			// The best case winning is 8x regardless of block size.
  1017  			//
  1018  			// Objects obtained from tiny allocator must not be freed explicitly.
  1019  			// So when an object will be freed explicitly, we ensure that
  1020  			// its size >= maxTinySize.
  1021  			//
  1022  			// SetFinalizer has a special case for objects potentially coming
  1023  			// from tiny allocator, it such case it allows to set finalizers
  1024  			// for an inner byte of a memory block.
  1025  			//
  1026  			// The main targets of tiny allocator are small strings and
  1027  			// standalone escaping variables. On a json benchmark
  1028  			// the allocator reduces number of allocations by ~12% and
  1029  			// reduces heap size by ~20%.
  1030  			off := c.tinyoffset
  1031  			// Align tiny pointer for required (conservative) alignment.
  1032  			if size&7 == 0 {
  1033  				off = alignUp(off, 8)
  1034  			} else if goarch.PtrSize == 4 && size == 12 {
  1035  				// Conservatively align 12-byte objects to 8 bytes on 32-bit
  1036  				// systems so that objects whose first field is a 64-bit
  1037  				// value is aligned to 8 bytes and does not cause a fault on
  1038  				// atomic access. See issue 37262.
  1039  				// TODO(mknyszek): Remove this workaround if/when issue 36606
  1040  				// is resolved.
  1041  				off = alignUp(off, 8)
  1042  			} else if size&3 == 0 {
  1043  				off = alignUp(off, 4)
  1044  			} else if size&1 == 0 {
  1045  				off = alignUp(off, 2)
  1046  			}
  1047  			if off+size <= maxTinySize && c.tiny != 0 {
  1048  				// The object fits into existing tiny block.
  1049  				x = unsafe.Pointer(c.tiny + off)
  1050  				c.tinyoffset = off + size
  1051  				c.tinyAllocs++
  1052  				mp.mallocing = 0
  1053  				releasem(mp)
  1054  				return x
  1055  			}
  1056  			// Allocate a new maxTinySize block.
  1057  			span = c.alloc[tinySpanClass]
  1058  			v := nextFreeFast(span)
  1059  			if v == 0 {
  1060  				v, span, shouldhelpgc = c.nextFree(tinySpanClass)
  1061  			}
  1062  			x = unsafe.Pointer(v)
  1063  			(*[2]uint64)(x)[0] = 0
  1064  			(*[2]uint64)(x)[1] = 0
  1065  			// See if we need to replace the existing tiny block with the new one
  1066  			// based on amount of remaining free space.
  1067  			if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
  1068  				// Note: disabled when race detector is on, see comment near end of this function.
  1069  				c.tiny = uintptr(x)
  1070  				c.tinyoffset = size
  1071  			}
  1072  			size = maxTinySize
  1073  		} else {
  1074  			var sizeclass uint8
  1075  			if size <= smallSizeMax-8 {
  1076  				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
  1077  			} else {
  1078  				sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
  1079  			}
  1080  			size = uintptr(class_to_size[sizeclass])
  1081  			spc := makeSpanClass(sizeclass, noscan)
  1082  			span = c.alloc[spc]
  1083  			v := nextFreeFast(span)
  1084  			if v == 0 {
  1085  				v, span, shouldhelpgc = c.nextFree(spc)
  1086  			}
  1087  			x = unsafe.Pointer(v)
  1088  			if needzero && span.needzero != 0 {
  1089  				memclrNoHeapPointers(unsafe.Pointer(v), size)
  1090  			}
  1091  		}
  1092  	} else {
  1093  		shouldhelpgc = true
  1094  		// For large allocations, keep track of zeroed state so that
  1095  		// bulk zeroing can be happen later in a preemptible context.
  1096  		span = c.allocLarge(size, noscan)
  1097  		span.freeindex = 1
  1098  		span.allocCount = 1
  1099  		size = span.elemsize
  1100  		x = unsafe.Pointer(span.base())
  1101  		if needzero && span.needzero != 0 {
  1102  			if noscan {
  1103  				delayedZeroing = true
  1104  			} else {
  1105  				memclrNoHeapPointers(x, size)
  1106  				// We've in theory cleared almost the whole span here,
  1107  				// and could take the extra step of actually clearing
  1108  				// the whole thing. However, don't. Any GC bits for the
  1109  				// uncleared parts will be zero, and it's just going to
  1110  				// be needzero = 1 once freed anyway.
  1111  			}
  1112  		}
  1113  	}
  1114  
  1115  	var scanSize uintptr
  1116  	if !noscan {
  1117  		heapBitsSetType(uintptr(x), size, dataSize, typ)
  1118  		if dataSize > typ.size {
  1119  			// Array allocation. If there are any
  1120  			// pointers, GC has to scan to the last
  1121  			// element.
  1122  			if typ.ptrdata != 0 {
  1123  				scanSize = dataSize - typ.size + typ.ptrdata
  1124  			}
  1125  		} else {
  1126  			scanSize = typ.ptrdata
  1127  		}
  1128  		c.scanAlloc += scanSize
  1129  	}
  1130  
  1131  	// Ensure that the stores above that initialize x to
  1132  	// type-safe memory and set the heap bits occur before
  1133  	// the caller can make x observable to the garbage
  1134  	// collector. Otherwise, on weakly ordered machines,
  1135  	// the garbage collector could follow a pointer to x,
  1136  	// but see uninitialized memory or stale heap bits.
  1137  	publicationBarrier()
  1138  
  1139  	// Allocate black during GC.
  1140  	// All slots hold nil so no scanning is needed.
  1141  	// This may be racing with GC so do it atomically if there can be
  1142  	// a race marking the bit.
  1143  	if gcphase != _GCoff {
  1144  		gcmarknewobject(span, uintptr(x), size, scanSize)
  1145  	}
  1146  
  1147  	if raceenabled {
  1148  		racemalloc(x, size)
  1149  	}
  1150  
  1151  	if msanenabled {
  1152  		msanmalloc(x, size)
  1153  	}
  1154  
  1155  	if asanenabled {
  1156  		// We should only read/write the memory with the size asked by the user.
  1157  		// The rest of the allocated memory should be poisoned, so that we can report
  1158  		// errors when accessing poisoned memory.
  1159  		// The allocated memory is larger than required userSize, it will also include
  1160  		// redzone and some other padding bytes.
  1161  		rzBeg := unsafe.Add(x, userSize)
  1162  		asanpoison(rzBeg, size-userSize)
  1163  		asanunpoison(x, userSize)
  1164  	}
  1165  
  1166  	if rate := MemProfileRate; rate > 0 {
  1167  		// Note cache c only valid while m acquired; see #47302
  1168  		if rate != 1 && size < c.nextSample {
  1169  			c.nextSample -= size
  1170  		} else {
  1171  			profilealloc(mp, x, size)
  1172  		}
  1173  	}
  1174  	mp.mallocing = 0
  1175  	releasem(mp)
  1176  
  1177  	// Pointerfree data can be zeroed late in a context where preemption can occur.
  1178  	// x will keep the memory alive.
  1179  	if delayedZeroing {
  1180  		if !noscan {
  1181  			throw("delayed zeroing on data that may contain pointers")
  1182  		}
  1183  		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
  1184  	}
  1185  
  1186  	if debug.malloc {
  1187  		if debug.allocfreetrace != 0 {
  1188  			tracealloc(x, size, typ)
  1189  		}
  1190  
  1191  		if inittrace.active && inittrace.id == getg().goid {
  1192  			// Init functions are executed sequentially in a single goroutine.
  1193  			inittrace.bytes += uint64(size)
  1194  		}
  1195  	}
  1196  
  1197  	if assistG != nil {
  1198  		// Account for internal fragmentation in the assist
  1199  		// debt now that we know it.
  1200  		assistG.gcAssistBytes -= int64(size - dataSize)
  1201  	}
  1202  
  1203  	if shouldhelpgc {
  1204  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1205  			gcStart(t)
  1206  		}
  1207  	}
  1208  
  1209  	if raceenabled && noscan && dataSize < maxTinySize {
  1210  		// Pad tinysize allocations so they are aligned with the end
  1211  		// of the tinyalloc region. This ensures that any arithmetic
  1212  		// that goes off the top end of the object will be detectable
  1213  		// by checkptr (issue 38872).
  1214  		// Note that we disable tinyalloc when raceenabled for this to work.
  1215  		// TODO: This padding is only performed when the race detector
  1216  		// is enabled. It would be nice to enable it if any package
  1217  		// was compiled with checkptr, but there's no easy way to
  1218  		// detect that (especially at compile time).
  1219  		// TODO: enable this padding for all allocations, not just
  1220  		// tinyalloc ones. It's tricky because of pointer maps.
  1221  		// Maybe just all noscan objects?
  1222  		x = add(x, size-dataSize)
  1223  	}
  1224  
  1225  	return x
  1226  }
  1227  
  1228  // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
  1229  // on chunks of the buffer to be zeroed, with opportunities for preemption
  1230  // along the way.  memclrNoHeapPointers contains no safepoints and also
  1231  // cannot be preemptively scheduled, so this provides a still-efficient
  1232  // block copy that can also be preempted on a reasonable granularity.
  1233  //
  1234  // Use this with care; if the data being cleared is tagged to contain
  1235  // pointers, this allows the GC to run before it is all cleared.
  1236  func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
  1237  	v := uintptr(x)
  1238  	// got this from benchmarking. 128k is too small, 512k is too large.
  1239  	const chunkBytes = 256 * 1024
  1240  	vsize := v + size
  1241  	for voff := v; voff < vsize; voff = voff + chunkBytes {
  1242  		if getg().preempt {
  1243  			// may hold locks, e.g., profiling
  1244  			goschedguarded()
  1245  		}
  1246  		// clear min(avail, lump) bytes
  1247  		n := vsize - voff
  1248  		if n > chunkBytes {
  1249  			n = chunkBytes
  1250  		}
  1251  		memclrNoHeapPointers(unsafe.Pointer(voff), n)
  1252  	}
  1253  }
  1254  
  1255  // implementation of new builtin
  1256  // compiler (both frontend and SSA backend) knows the signature
  1257  // of this function
  1258  func newobject(typ *_type) unsafe.Pointer {
  1259  	return mallocgc(typ.size, typ, true)
  1260  }
  1261  
  1262  //go:linkname reflect_unsafe_New reflect.unsafe_New
  1263  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1264  	return mallocgc(typ.size, typ, true)
  1265  }
  1266  
  1267  //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1268  func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1269  	return mallocgc(typ.size, typ, true)
  1270  }
  1271  
  1272  // newarray allocates an array of n elements of type typ.
  1273  func newarray(typ *_type, n int) unsafe.Pointer {
  1274  	if n == 1 {
  1275  		return mallocgc(typ.size, typ, true)
  1276  	}
  1277  	mem, overflow := math.MulUintptr(typ.size, uintptr(n))
  1278  	if overflow || mem > maxAlloc || n < 0 {
  1279  		panic(plainError("runtime: allocation size out of range"))
  1280  	}
  1281  	return mallocgc(mem, typ, true)
  1282  }
  1283  
  1284  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1285  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1286  	return newarray(typ, n)
  1287  }
  1288  
  1289  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1290  	c := getMCache(mp)
  1291  	if c == nil {
  1292  		throw("profilealloc called without a P or outside bootstrapping")
  1293  	}
  1294  	c.nextSample = nextSample()
  1295  	mProf_Malloc(x, size)
  1296  }
  1297  
  1298  // nextSample returns the next sampling point for heap profiling. The goal is
  1299  // to sample allocations on average every MemProfileRate bytes, but with a
  1300  // completely random distribution over the allocation timeline; this
  1301  // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1302  // processes, the distance between two samples follows the exponential
  1303  // distribution (exp(MemProfileRate)), so the best return value is a random
  1304  // number taken from an exponential distribution whose mean is MemProfileRate.
  1305  func nextSample() uintptr {
  1306  	if MemProfileRate == 1 {
  1307  		// Callers assign our return value to
  1308  		// mcache.next_sample, but next_sample is not used
  1309  		// when the rate is 1. So avoid the math below and
  1310  		// just return something.
  1311  		return 0
  1312  	}
  1313  	if GOOS == "plan9" {
  1314  		// Plan 9 doesn't support floating point in note handler.
  1315  		if g := getg(); g == g.m.gsignal {
  1316  			return nextSampleNoFP()
  1317  		}
  1318  	}
  1319  
  1320  	return uintptr(fastexprand(MemProfileRate))
  1321  }
  1322  
  1323  // fastexprand returns a random number from an exponential distribution with
  1324  // the specified mean.
  1325  func fastexprand(mean int) int32 {
  1326  	// Avoid overflow. Maximum possible step is
  1327  	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1328  	switch {
  1329  	case mean > 0x7000000:
  1330  		mean = 0x7000000
  1331  	case mean == 0:
  1332  		return 0
  1333  	}
  1334  
  1335  	// Take a random sample of the exponential distribution exp(-mean*x).
  1336  	// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1337  	// p = 1 - exp(-mean*x), so
  1338  	// q = 1 - p == exp(-mean*x)
  1339  	// log_e(q) = -mean*x
  1340  	// -log_e(q)/mean = x
  1341  	// x = -log_e(q) * mean
  1342  	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1343  	const randomBitCount = 26
  1344  	q := fastrandn(1<<randomBitCount) + 1
  1345  	qlog := fastlog2(float64(q)) - randomBitCount
  1346  	if qlog > 0 {
  1347  		qlog = 0
  1348  	}
  1349  	const minusLog2 = -0.6931471805599453 // -ln(2)
  1350  	return int32(qlog*(minusLog2*float64(mean))) + 1
  1351  }
  1352  
  1353  // nextSampleNoFP is similar to nextSample, but uses older,
  1354  // simpler code to avoid floating point.
  1355  func nextSampleNoFP() uintptr {
  1356  	// Set first allocation sample size.
  1357  	rate := MemProfileRate
  1358  	if rate > 0x3fffffff { // make 2*rate not overflow
  1359  		rate = 0x3fffffff
  1360  	}
  1361  	if rate != 0 {
  1362  		return uintptr(fastrandn(uint32(2 * rate)))
  1363  	}
  1364  	return 0
  1365  }
  1366  
  1367  type persistentAlloc struct {
  1368  	base *notInHeap
  1369  	off  uintptr
  1370  }
  1371  
  1372  var globalAlloc struct {
  1373  	mutex
  1374  	persistentAlloc
  1375  }
  1376  
  1377  // persistentChunkSize is the number of bytes we allocate when we grow
  1378  // a persistentAlloc.
  1379  const persistentChunkSize = 256 << 10
  1380  
  1381  // persistentChunks is a list of all the persistent chunks we have
  1382  // allocated. The list is maintained through the first word in the
  1383  // persistent chunk. This is updated atomically.
  1384  var persistentChunks *notInHeap
  1385  
  1386  // Wrapper around sysAlloc that can allocate small chunks.
  1387  // There is no associated free operation.
  1388  // Intended for things like function/type/debug-related persistent data.
  1389  // If align is 0, uses default align (currently 8).
  1390  // The returned memory will be zeroed.
  1391  //
  1392  // Consider marking persistentalloc'd types go:notinheap.
  1393  func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1394  	var p *notInHeap
  1395  	systemstack(func() {
  1396  		p = persistentalloc1(size, align, sysStat)
  1397  	})
  1398  	return unsafe.Pointer(p)
  1399  }
  1400  
  1401  // Must run on system stack because stack growth can (re)invoke it.
  1402  // See issue 9174.
  1403  //go:systemstack
  1404  func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
  1405  	const (
  1406  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1407  	)
  1408  
  1409  	if size == 0 {
  1410  		throw("persistentalloc: size == 0")
  1411  	}
  1412  	if align != 0 {
  1413  		if align&(align-1) != 0 {
  1414  			throw("persistentalloc: align is not a power of 2")
  1415  		}
  1416  		if align > _PageSize {
  1417  			throw("persistentalloc: align is too large")
  1418  		}
  1419  	} else {
  1420  		align = 8
  1421  	}
  1422  
  1423  	if size >= maxBlock {
  1424  		return (*notInHeap)(sysAlloc(size, sysStat))
  1425  	}
  1426  
  1427  	mp := acquirem()
  1428  	var persistent *persistentAlloc
  1429  	if mp != nil && mp.p != 0 {
  1430  		persistent = &mp.p.ptr().palloc
  1431  	} else {
  1432  		lock(&globalAlloc.mutex)
  1433  		persistent = &globalAlloc.persistentAlloc
  1434  	}
  1435  	persistent.off = alignUp(persistent.off, align)
  1436  	if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1437  		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
  1438  		if persistent.base == nil {
  1439  			if persistent == &globalAlloc.persistentAlloc {
  1440  				unlock(&globalAlloc.mutex)
  1441  			}
  1442  			throw("runtime: cannot allocate memory")
  1443  		}
  1444  
  1445  		// Add the new chunk to the persistentChunks list.
  1446  		for {
  1447  			chunks := uintptr(unsafe.Pointer(persistentChunks))
  1448  			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1449  			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1450  				break
  1451  			}
  1452  		}
  1453  		persistent.off = alignUp(goarch.PtrSize, align)
  1454  	}
  1455  	p := persistent.base.add(persistent.off)
  1456  	persistent.off += size
  1457  	releasem(mp)
  1458  	if persistent == &globalAlloc.persistentAlloc {
  1459  		unlock(&globalAlloc.mutex)
  1460  	}
  1461  
  1462  	if sysStat != &memstats.other_sys {
  1463  		sysStat.add(int64(size))
  1464  		memstats.other_sys.add(-int64(size))
  1465  	}
  1466  	return p
  1467  }
  1468  
  1469  // inPersistentAlloc reports whether p points to memory allocated by
  1470  // persistentalloc. This must be nosplit because it is called by the
  1471  // cgo checker code, which is called by the write barrier code.
  1472  //go:nosplit
  1473  func inPersistentAlloc(p uintptr) bool {
  1474  	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  1475  	for chunk != 0 {
  1476  		if p >= chunk && p < chunk+persistentChunkSize {
  1477  			return true
  1478  		}
  1479  		chunk = *(*uintptr)(unsafe.Pointer(chunk))
  1480  	}
  1481  	return false
  1482  }
  1483  
  1484  // linearAlloc is a simple linear allocator that pre-reserves a region
  1485  // of memory and then optionally maps that region into the Ready state
  1486  // as needed.
  1487  //
  1488  // The caller is responsible for locking.
  1489  type linearAlloc struct {
  1490  	next   uintptr // next free byte
  1491  	mapped uintptr // one byte past end of mapped space
  1492  	end    uintptr // end of reserved space
  1493  
  1494  	mapMemory bool // transition memory from Reserved to Ready if true
  1495  }
  1496  
  1497  func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
  1498  	if base+size < base {
  1499  		// Chop off the last byte. The runtime isn't prepared
  1500  		// to deal with situations where the bounds could overflow.
  1501  		// Leave that memory reserved, though, so we don't map it
  1502  		// later.
  1503  		size -= 1
  1504  	}
  1505  	l.next, l.mapped = base, base
  1506  	l.end = base + size
  1507  	l.mapMemory = mapMemory
  1508  }
  1509  
  1510  func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1511  	p := alignUp(l.next, align)
  1512  	if p+size > l.end {
  1513  		return nil
  1514  	}
  1515  	l.next = p + size
  1516  	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
  1517  		if l.mapMemory {
  1518  			// Transition from Reserved to Prepared to Ready.
  1519  			sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
  1520  			sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
  1521  		}
  1522  		l.mapped = pEnd
  1523  	}
  1524  	return unsafe.Pointer(p)
  1525  }
  1526  
  1527  // notInHeap is off-heap memory allocated by a lower-level allocator
  1528  // like sysAlloc or persistentAlloc.
  1529  //
  1530  // In general, it's better to use real types marked as go:notinheap,
  1531  // but this serves as a generic type for situations where that isn't
  1532  // possible (like in the allocators).
  1533  //
  1534  // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  1535  //
  1536  //go:notinheap
  1537  type notInHeap struct{}
  1538  
  1539  func (p *notInHeap) add(bytes uintptr) *notInHeap {
  1540  	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  1541  }
  1542  
  1543  // computeRZlog computes the size of the redzone.
  1544  // Refer to the implementation of the compiler-rt.
  1545  func computeRZlog(userSize uintptr) uintptr {
  1546  	switch {
  1547  	case userSize <= (64 - 16):
  1548  		return 16 << 0
  1549  	case userSize <= (128 - 32):
  1550  		return 16 << 1
  1551  	case userSize <= (512 - 64):
  1552  		return 16 << 2
  1553  	case userSize <= (4096 - 128):
  1554  		return 16 << 3
  1555  	case userSize <= (1<<14)-256:
  1556  		return 16 << 4
  1557  	case userSize <= (1<<15)-512:
  1558  		return 16 << 5
  1559  	case userSize <= (1<<16)-1024:
  1560  		return 16 << 6
  1561  	default:
  1562  		return 16 << 7
  1563  	}
  1564  }