github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/malloc.h (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator, based on tcmalloc.
     6  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     7  
     8  // The main allocator works in runs of pages.
     9  // Small allocation sizes (up to and including 32 kB) are
    10  // rounded to one of about 100 size classes, each of which
    11  // has its own free list of objects of exactly that size.
    12  // Any free page of memory can be split into a set of objects
    13  // of one size class, which are then managed using free list
    14  // allocators.
    15  //
    16  // The allocator's data structures are:
    17  //
    18  //	FixAlloc: a free-list allocator for fixed-size objects,
    19  //		used to manage storage used by the allocator.
    20  //	MHeap: the malloc heap, managed at page (4096-byte) granularity.
    21  //	MSpan: a run of pages managed by the MHeap.
    22  //	MCentral: a shared free list for a given size class.
    23  //	MCache: a per-thread (in Go, per-P) cache for small objects.
    24  //	MStats: allocation statistics.
    25  //
    26  // Allocating a small object proceeds up a hierarchy of caches:
    27  //
    28  //	1. Round the size up to one of the small size classes
    29  //	   and look in the corresponding MCache free list.
    30  //	   If the list is not empty, allocate an object from it.
    31  //	   This can all be done without acquiring a lock.
    32  //
    33  //	2. If the MCache free list is empty, replenish it by
    34  //	   taking a bunch of objects from the MCentral free list.
    35  //	   Moving a bunch amortizes the cost of acquiring the MCentral lock.
    36  //
    37  //	3. If the MCentral free list is empty, replenish it by
    38  //	   allocating a run of pages from the MHeap and then
    39  //	   chopping that memory into a objects of the given size.
    40  //	   Allocating many objects amortizes the cost of locking
    41  //	   the heap.
    42  //
    43  //	4. If the MHeap is empty or has no page runs large enough,
    44  //	   allocate a new group of pages (at least 1MB) from the
    45  //	   operating system.  Allocating a large run of pages
    46  //	   amortizes the cost of talking to the operating system.
    47  //
    48  // Freeing a small object proceeds up the same hierarchy:
    49  //
    50  //	1. Look up the size class for the object and add it to
    51  //	   the MCache free list.
    52  //
    53  //	2. If the MCache free list is too long or the MCache has
    54  //	   too much memory, return some to the MCentral free lists.
    55  //
    56  //	3. If all the objects in a given span have returned to
    57  //	   the MCentral list, return that span to the page heap.
    58  //
    59  //	4. If the heap has too much memory, return some to the
    60  //	   operating system.
    61  //
    62  //	TODO(rsc): Step 4 is not implemented.
    63  //
    64  // Allocating and freeing a large object uses the page heap
    65  // directly, bypassing the MCache and MCentral free lists.
    66  //
    67  // The small objects on the MCache and MCentral free lists
    68  // may or may not be zeroed.  They are zeroed if and only if
    69  // the second word of the object is zero.  A span in the
    70  // page heap is zeroed unless s->needzero is set. When a span
    71  // is allocated to break into small objects, it is zeroed if needed
    72  // and s->needzero is set. There are two main benefits to delaying the
    73  // zeroing this way:
    74  //
    75  //	1. stack frames allocated from the small object lists
    76  //	   or the page heap can avoid zeroing altogether.
    77  //	2. the cost of zeroing when reusing a small object is
    78  //	   charged to the mutator, not the garbage collector.
    79  //
    80  // This C code was written with an eye toward translating to Go
    81  // in the future.  Methods have the form Type_Method(Type *t, ...).
    82  
    83  typedef struct MCentral	MCentral;
    84  typedef struct MHeap	MHeap;
    85  typedef struct MSpan	MSpan;
    86  typedef struct MStats	MStats;
    87  typedef struct MLink	MLink;
    88  typedef struct MTypes	MTypes;
    89  typedef struct GCStats	GCStats;
    90  
    91  enum
    92  {
    93  	PageShift	= 13,
    94  	PageSize	= 1<<PageShift,
    95  	PageMask	= PageSize - 1,
    96  };
    97  typedef	uintptr	PageID;		// address >> PageShift
    98  
    99  enum
   100  {
   101  	// Computed constant.  The definition of MaxSmallSize and the
   102  	// algorithm in msize.c produce some number of different allocation
   103  	// size classes.  NumSizeClasses is that number.  It's needed here
   104  	// because there are static arrays of this length; when msize runs its
   105  	// size choosing algorithm it double-checks that NumSizeClasses agrees.
   106  	NumSizeClasses = 67,
   107  
   108  	// Tunable constants.
   109  	MaxSmallSize = 32<<10,
   110  
   111  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
   112  	TinySize = 16,
   113  	TinySizeClass = 2,
   114  
   115  	FixAllocChunk = 16<<10,		// Chunk size for FixAlloc
   116  	MaxMHeapList = 1<<(20 - PageShift),	// Maximum page length for fixed-size list in MHeap.
   117  	HeapAllocChunk = 1<<20,		// Chunk size for heap growth
   118  
   119  	// Number of bits in page to span calculations (4k pages).
   120  	// On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason).
   121  	// On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
   122  	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
   123  #if __SIZEOF_POINTER__ == 8
   124  #ifdef GOOS_windows
   125  	// Windows counts memory used by page table into committed memory
   126  	// of the process, so we can't reserve too much memory.
   127  	// See http://golang.org/issue/5402 and http://golang.org/issue/5236.
   128  	MHeapMap_Bits = 35 - PageShift,
   129  #else
   130  	MHeapMap_Bits = 37 - PageShift,
   131  #endif
   132  #else
   133  	MHeapMap_Bits = 32 - PageShift,
   134  #endif
   135  
   136  	// Max number of threads to run garbage collection.
   137  	// 2, 3, and 4 are all plausible maximums depending
   138  	// on the hardware details of the machine.  The garbage
   139  	// collector scales well to 8 cpus.
   140  	MaxGcproc = 8,
   141  };
   142  
   143  // Maximum memory allocation size, a hint for callers.
   144  // This must be a #define instead of an enum because it
   145  // is so large.
   146  #if __SIZEOF_POINTER__ == 8
   147  #define	MaxMem	(1ULL<<(MHeapMap_Bits+PageShift))	/* 128 GB or 32 GB */
   148  #else
   149  #define	MaxMem	((uintptr)-1)
   150  #endif
   151  
   152  // A generic linked list of blocks.  (Typically the block is bigger than sizeof(MLink).)
   153  struct MLink
   154  {
   155  	MLink *next;
   156  };
   157  
   158  // SysAlloc obtains a large chunk of zeroed memory from the
   159  // operating system, typically on the order of a hundred kilobytes
   160  // or a megabyte.
   161  // NOTE: SysAlloc returns OS-aligned memory, but the heap allocator
   162  // may use larger alignment, so the caller must be careful to realign the
   163  // memory obtained by SysAlloc.
   164  //
   165  // SysUnused notifies the operating system that the contents
   166  // of the memory region are no longer needed and can be reused
   167  // for other purposes.
   168  // SysUsed notifies the operating system that the contents
   169  // of the memory region are needed again.
   170  //
   171  // SysFree returns it unconditionally; this is only used if
   172  // an out-of-memory error has been detected midway through
   173  // an allocation.  It is okay if SysFree is a no-op.
   174  //
   175  // SysReserve reserves address space without allocating memory.
   176  // If the pointer passed to it is non-nil, the caller wants the
   177  // reservation there, but SysReserve can still choose another
   178  // location if that one is unavailable.  On some systems and in some
   179  // cases SysReserve will simply check that the address space is
   180  // available and not actually reserve it.  If SysReserve returns
   181  // non-nil, it sets *reserved to true if the address space is
   182  // reserved, false if it has merely been checked.
   183  // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
   184  // may use larger alignment, so the caller must be careful to realign the
   185  // memory obtained by SysAlloc.
   186  //
   187  // SysMap maps previously reserved address space for use.
   188  // The reserved argument is true if the address space was really
   189  // reserved, not merely checked.
   190  //
   191  // SysFault marks a (already SysAlloc'd) region to fault
   192  // if accessed.  Used only for debugging the runtime.
   193  
   194  void*	runtime_SysAlloc(uintptr nbytes, uint64 *stat);
   195  void	runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
   196  void	runtime_SysUnused(void *v, uintptr nbytes);
   197  void	runtime_SysUsed(void *v, uintptr nbytes);
   198  void	runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
   199  void*	runtime_SysReserve(void *v, uintptr nbytes, bool *reserved);
   200  void	runtime_SysFault(void *v, uintptr nbytes);
   201  
   202  // FixAlloc is a simple free-list allocator for fixed size objects.
   203  // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
   204  // MCache and MSpan objects.
   205  //
   206  // Memory returned by FixAlloc_Alloc is not zeroed.
   207  // The caller is responsible for locking around FixAlloc calls.
   208  // Callers can keep state in the object but the first word is
   209  // smashed by freeing and reallocating.
   210  struct FixAlloc
   211  {
   212  	uintptr	size;
   213  	void	(*first)(void *arg, byte *p);	// called first time p is returned
   214  	void*	arg;
   215  	MLink*	list;
   216  	byte*	chunk;
   217  	uint32	nchunk;
   218  	uintptr	inuse;	// in-use bytes now
   219  	uint64*	stat;
   220  };
   221  
   222  void	runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat);
   223  void*	runtime_FixAlloc_Alloc(FixAlloc *f);
   224  void	runtime_FixAlloc_Free(FixAlloc *f, void *p);
   225  
   226  
   227  // Statistics.
   228  // Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
   229  struct MStats
   230  {
   231  	// General statistics.
   232  	uint64	alloc;		// bytes allocated and still in use
   233  	uint64	total_alloc;	// bytes allocated (even if freed)
   234  	uint64	sys;		// bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
   235  	uint64	nlookup;	// number of pointer lookups
   236  	uint64	nmalloc;	// number of mallocs
   237  	uint64	nfree;  // number of frees
   238  
   239  	// Statistics about malloc heap.
   240  	// protected by mheap.Lock
   241  	uint64	heap_alloc;	// bytes allocated and still in use
   242  	uint64	heap_sys;	// bytes obtained from system
   243  	uint64	heap_idle;	// bytes in idle spans
   244  	uint64	heap_inuse;	// bytes in non-idle spans
   245  	uint64	heap_released;	// bytes released to the OS
   246  	uint64	heap_objects;	// total number of allocated objects
   247  
   248  	// Statistics about allocation of low-level fixed-size structures.
   249  	// Protected by FixAlloc locks.
   250  	uint64	stacks_inuse;	// bootstrap stacks
   251  	uint64	stacks_sys;
   252  	uint64	mspan_inuse;	// MSpan structures
   253  	uint64	mspan_sys;
   254  	uint64	mcache_inuse;	// MCache structures
   255  	uint64	mcache_sys;
   256  	uint64	buckhash_sys;	// profiling bucket hash table
   257  	uint64	gc_sys;
   258  	uint64	other_sys;
   259  
   260  	// Statistics about garbage collector.
   261  	// Protected by mheap or stopping the world during GC.
   262  	uint64	next_gc;	// next GC (in heap_alloc time)
   263  	uint64  last_gc;	// last GC (in absolute time)
   264  	uint64	pause_total_ns;
   265  	uint64	pause_ns[256];
   266  	uint64	pause_end[256];
   267  	uint32	numgc;
   268  	float64	gc_cpu_fraction;
   269  	bool	enablegc;
   270  	bool	debuggc;
   271  
   272  	// Statistics about allocation size classes.
   273  	struct {
   274  		uint32 size;
   275  		uint64 nmalloc;
   276  		uint64 nfree;
   277  	} by_size[NumSizeClasses];
   278  };
   279  
   280  extern MStats mstats
   281    __asm__ (GOSYM_PREFIX "runtime.memStats");
   282  void	runtime_updatememstats(GCStats *stats);
   283  
   284  // Size classes.  Computed and initialized by InitSizes.
   285  //
   286  // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
   287  //	1 <= sizeclass < NumSizeClasses, for n.
   288  //	Size class 0 is reserved to mean "not small".
   289  //
   290  // class_to_size[i] = largest size in class i
   291  // class_to_allocnpages[i] = number of pages to allocate when
   292  //	making new objects in class i
   293  
   294  int32	runtime_SizeToClass(int32);
   295  uintptr	runtime_roundupsize(uintptr);
   296  extern	int32	runtime_class_to_size[NumSizeClasses];
   297  extern	int32	runtime_class_to_allocnpages[NumSizeClasses];
   298  extern	int8	runtime_size_to_class8[1024/8 + 1];
   299  extern	int8	runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
   300  extern	void	runtime_InitSizes(void);
   301  
   302  
   303  typedef struct MCacheList MCacheList;
   304  struct MCacheList
   305  {
   306  	MLink *list;
   307  	uint32 nlist;
   308  };
   309  
   310  // Per-thread (in Go, per-P) cache for small objects.
   311  // No locking needed because it is per-thread (per-P).
   312  struct MCache
   313  {
   314  	// The following members are accessed on every malloc,
   315  	// so they are grouped here for better caching.
   316  	int32 next_sample;		// trigger heap sample after allocating this many bytes
   317  	intptr local_cachealloc;	// bytes allocated (or freed) from cache since last lock of heap
   318  	// Allocator cache for tiny objects w/o pointers.
   319  	// See "Tiny allocator" comment in malloc.goc.
   320  	byte*	tiny;
   321  	uintptr	tinysize;
   322  	// The rest is not accessed on every malloc.
   323  	MSpan*	alloc[NumSizeClasses];	// spans to allocate from
   324  	MCacheList free[NumSizeClasses];// lists of explicitly freed objects
   325  	// Local allocator stats, flushed during GC.
   326  	uintptr local_nlookup;		// number of pointer lookups
   327  	uintptr local_largefree;	// bytes freed for large objects (>MaxSmallSize)
   328  	uintptr local_nlargefree;	// number of frees for large objects (>MaxSmallSize)
   329  	uintptr local_nsmallfree[NumSizeClasses];	// number of frees for small objects (<=MaxSmallSize)
   330  };
   331  
   332  MSpan*	runtime_MCache_Refill(MCache *c, int32 sizeclass);
   333  void	runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
   334  void	runtime_MCache_ReleaseAll(MCache *c);
   335  
   336  // MTypes describes the types of blocks allocated within a span.
   337  // The compression field describes the layout of the data.
   338  //
   339  // MTypes_Empty:
   340  //     All blocks are free, or no type information is available for
   341  //     allocated blocks.
   342  //     The data field has no meaning.
   343  // MTypes_Single:
   344  //     The span contains just one block.
   345  //     The data field holds the type information.
   346  //     The sysalloc field has no meaning.
   347  // MTypes_Words:
   348  //     The span contains multiple blocks.
   349  //     The data field points to an array of type [NumBlocks]uintptr,
   350  //     and each element of the array holds the type of the corresponding
   351  //     block.
   352  // MTypes_Bytes:
   353  //     The span contains at most seven different types of blocks.
   354  //     The data field points to the following structure:
   355  //         struct {
   356  //             type  [8]uintptr       // type[0] is always 0
   357  //             index [NumBlocks]byte
   358  //         }
   359  //     The type of the i-th block is: data.type[data.index[i]]
   360  enum
   361  {
   362  	MTypes_Empty = 0,
   363  	MTypes_Single = 1,
   364  	MTypes_Words = 2,
   365  	MTypes_Bytes = 3,
   366  };
   367  struct MTypes
   368  {
   369  	byte	compression;	// one of MTypes_*
   370  	uintptr	data;
   371  };
   372  
   373  enum
   374  {
   375  	KindSpecialFinalizer = 1,
   376  	KindSpecialProfile = 2,
   377  	// Note: The finalizer special must be first because if we're freeing
   378  	// an object, a finalizer special will cause the freeing operation
   379  	// to abort, and we want to keep the other special records around
   380  	// if that happens.
   381  };
   382  
   383  typedef struct Special Special;
   384  struct Special
   385  {
   386  	Special*	next;	// linked list in span
   387  	uint16		offset;	// span offset of object
   388  	byte		kind;	// kind of Special
   389  };
   390  
   391  // The described object has a finalizer set for it.
   392  typedef struct SpecialFinalizer SpecialFinalizer;
   393  struct SpecialFinalizer
   394  {
   395  	Special		special;
   396  	FuncVal*	fn;
   397  	const FuncType*	ft;
   398  	const PtrType*	ot;
   399  };
   400  
   401  // The described object is being heap profiled.
   402  typedef struct Bucket Bucket; // from mprof.goc
   403  typedef struct SpecialProfile SpecialProfile;
   404  struct SpecialProfile
   405  {
   406  	Special	special;
   407  	Bucket*	b;
   408  };
   409  
   410  // An MSpan is a run of pages.
   411  enum
   412  {
   413  	MSpanInUse = 0,
   414  	MSpanFree,
   415  	MSpanListHead,
   416  	MSpanDead,
   417  };
   418  struct MSpan
   419  {
   420  	MSpan	*next;		// in a span linked list
   421  	MSpan	*prev;		// in a span linked list
   422  	PageID	start;		// starting page number
   423  	uintptr	npages;		// number of pages in span
   424  	MLink	*freelist;	// list of free objects
   425  	// sweep generation:
   426  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   427  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   428  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   429  	// h->sweepgen is incremented by 2 after every GC
   430  	uint32	sweepgen;
   431  	uint16	ref;		// capacity - number of objects in freelist
   432  	uint8	sizeclass;	// size class
   433  	bool	incache;	// being used by an MCache
   434  	uint8	state;		// MSpanInUse etc
   435  	uint8	needzero;	// needs to be zeroed before allocation
   436  	uintptr	elemsize;	// computed from sizeclass or from npages
   437  	int64   unusedsince;	// First time spotted by GC in MSpanFree state
   438  	uintptr npreleased;	// number of pages released to the OS
   439  	byte	*limit;		// end of data in span
   440  	MTypes	types;		// types of allocated objects in this span
   441  	Lock	specialLock;	// guards specials list
   442  	Special	*specials;	// linked list of special records sorted by offset.
   443  	MLink	*freebuf;	// objects freed explicitly, not incorporated into freelist yet
   444  };
   445  
   446  void	runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
   447  void	runtime_MSpan_EnsureSwept(MSpan *span);
   448  bool	runtime_MSpan_Sweep(MSpan *span);
   449  
   450  // Every MSpan is in one doubly-linked list,
   451  // either one of the MHeap's free lists or one of the
   452  // MCentral's span lists.  We use empty MSpan structures as list heads.
   453  void	runtime_MSpanList_Init(MSpan *list);
   454  bool	runtime_MSpanList_IsEmpty(MSpan *list);
   455  void	runtime_MSpanList_Insert(MSpan *list, MSpan *span);
   456  void	runtime_MSpanList_InsertBack(MSpan *list, MSpan *span);
   457  void	runtime_MSpanList_Remove(MSpan *span);	// from whatever list it is in
   458  
   459  
   460  // Central list of free objects of a given size.
   461  struct MCentral
   462  {
   463  	Lock  lock;
   464  	int32 sizeclass;
   465  	MSpan nonempty;	// list of spans with a free object
   466  	MSpan empty;	// list of spans with no free objects (or cached in an MCache)
   467  	int32 nfree;	// # of objects available in nonempty spans
   468  };
   469  
   470  void	runtime_MCentral_Init(MCentral *c, int32 sizeclass);
   471  MSpan*	runtime_MCentral_CacheSpan(MCentral *c);
   472  void	runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s);
   473  bool	runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
   474  void	runtime_MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
   475  
   476  // Main malloc heap.
   477  // The heap itself is the "free[]" and "large" arrays,
   478  // but all the other global data is here too.
   479  struct MHeap
   480  {
   481  	Lock lock;
   482  	MSpan free[MaxMHeapList];	// free lists of given length
   483  	MSpan freelarge;		// free lists length >= MaxMHeapList
   484  	MSpan busy[MaxMHeapList];	// busy lists of large objects of given length
   485  	MSpan busylarge;		// busy lists of large objects length >= MaxMHeapList
   486  	MSpan **allspans;		// all spans out there
   487  	MSpan **sweepspans;		// copy of allspans referenced by sweeper
   488  	uint32	nspan;
   489  	uint32	nspancap;
   490  	uint32	sweepgen;		// sweep generation, see comment in MSpan
   491  	uint32	sweepdone;		// all spans are swept
   492  
   493  	// span lookup
   494  	MSpan**	spans;
   495  	uintptr	spans_mapped;
   496  
   497  	// range of addresses we might see in the heap
   498  	byte *bitmap;
   499  	uintptr bitmap_mapped;
   500  	byte *arena_start;
   501  	byte *arena_used;
   502  	byte *arena_end;
   503  	bool arena_reserved;
   504  
   505  	// central free lists for small size classes.
   506  	// the padding makes sure that the MCentrals are
   507  	// spaced CacheLineSize bytes apart, so that each MCentral.Lock
   508  	// gets its own cache line.
   509  	struct {
   510  		MCentral mcentral;
   511  		byte pad[64];
   512  	} central[NumSizeClasses];
   513  
   514  	FixAlloc spanalloc;	// allocator for Span*
   515  	FixAlloc cachealloc;	// allocator for MCache*
   516  	FixAlloc specialfinalizeralloc;	// allocator for SpecialFinalizer*
   517  	FixAlloc specialprofilealloc;	// allocator for SpecialProfile*
   518  	Lock speciallock; // lock for sepcial record allocators.
   519  
   520  	// Malloc stats.
   521  	uint64 largefree;	// bytes freed for large objects (>MaxSmallSize)
   522  	uint64 nlargefree;	// number of frees for large objects (>MaxSmallSize)
   523  	uint64 nsmallfree[NumSizeClasses];	// number of frees for small objects (<=MaxSmallSize)
   524  };
   525  extern MHeap runtime_mheap;
   526  
   527  void	runtime_MHeap_Init(MHeap *h);
   528  MSpan*	runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
   529  void	runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
   530  MSpan*	runtime_MHeap_Lookup(MHeap *h, void *v);
   531  MSpan*	runtime_MHeap_LookupMaybe(MHeap *h, void *v);
   532  void	runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj);
   533  void*	runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
   534  void	runtime_MHeap_MapBits(MHeap *h);
   535  void	runtime_MHeap_MapSpans(MHeap *h);
   536  void	runtime_MHeap_Scavenger(void*);
   537  void	runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
   538  
   539  void*	runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
   540  void*	runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
   541  int32	runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
   542  void	runtime_gc(int32 force);
   543  uintptr	runtime_sweepone(void);
   544  void	runtime_markscan(void *v);
   545  void	runtime_marknogc(void *v);
   546  void	runtime_checkallocated(void *v, uintptr n);
   547  void	runtime_markfreed(void *v);
   548  void	runtime_checkfreed(void *v, uintptr n);
   549  extern	int32	runtime_checking;
   550  void	runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
   551  void	runtime_unmarkspan(void *v, uintptr size);
   552  void	runtime_purgecachedstats(MCache*);
   553  void*	runtime_cnew(const Type*);
   554  void*	runtime_cnewarray(const Type*, intgo);
   555  void	runtime_tracealloc(void*, uintptr, uintptr);
   556  void	runtime_tracefree(void*, uintptr);
   557  void	runtime_tracegc(void);
   558  
   559  uintptr	runtime_gettype(void*);
   560  
   561  enum
   562  {
   563  	// flags to malloc
   564  	FlagNoScan	= 1<<0,	// GC doesn't have to scan object
   565  	FlagNoProfiling	= 1<<1,	// must not profile
   566  	FlagNoGC	= 1<<2,	// must not free or scan for pointers
   567  	FlagNoZero	= 1<<3, // don't zero memory
   568  	FlagNoInvokeGC	= 1<<4, // don't invoke GC
   569  };
   570  
   571  typedef struct Obj Obj;
   572  struct Obj
   573  {
   574  	byte	*p;	// data pointer
   575  	uintptr	n;	// size of data in bytes
   576  	uintptr	ti;	// type info
   577  };
   578  
   579  void	runtime_MProf_Malloc(void*, uintptr);
   580  void	runtime_MProf_Free(Bucket*, uintptr, bool);
   581  void	runtime_MProf_GC(void);
   582  void	runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
   583  int32	runtime_gcprocs(void);
   584  void	runtime_helpgc(int32 nproc);
   585  void	runtime_gchelper(void);
   586  void	runtime_createfing(void);
   587  G*	runtime_wakefing(void);
   588  extern bool	runtime_fingwait;
   589  extern bool	runtime_fingwake;
   590  
   591  void	runtime_setprofilebucket(void *p, Bucket *b);
   592  
   593  struct __go_func_type;
   594  struct __go_ptr_type;
   595  bool	runtime_addfinalizer(void *p, FuncVal *fn, const struct __go_func_type*, const struct __go_ptr_type*);
   596  void	runtime_removefinalizer(void*);
   597  void	runtime_queuefinalizer(void *p, FuncVal *fn, const struct __go_func_type *ft, const struct __go_ptr_type *ot);
   598  
   599  void	runtime_freeallspecials(MSpan *span, void *p, uintptr size);
   600  bool	runtime_freespecial(Special *s, void *p, uintptr size, bool freed);
   601  
   602  enum
   603  {
   604  	TypeInfo_SingleObject = 0,
   605  	TypeInfo_Array = 1,
   606  	TypeInfo_Chan = 2,
   607  
   608  	// Enables type information at the end of blocks allocated from heap	
   609  	DebugTypeAtBlockEnd = 0,
   610  };
   611  
   612  // Information from the compiler about the layout of stack frames.
   613  typedef struct BitVector BitVector;
   614  struct BitVector
   615  {
   616  	int32 n; // # of bits
   617  	uint32 *data;
   618  };
   619  typedef struct StackMap StackMap;
   620  struct StackMap
   621  {
   622  	int32 n; // number of bitmaps
   623  	int32 nbit; // number of bits in each bitmap
   624  	uint32 data[];
   625  };
   626  enum {
   627  	// Pointer map
   628  	BitsPerPointer = 2,
   629  	BitsDead = 0,
   630  	BitsScalar = 1,
   631  	BitsPointer = 2,
   632  	BitsMultiWord = 3,
   633  	// BitsMultiWord will be set for the first word of a multi-word item.
   634  	// When it is set, one of the following will be set for the second word.
   635  	BitsString = 0,
   636  	BitsSlice = 1,
   637  	BitsIface = 2,
   638  	BitsEface = 3,
   639  };
   640  // Returns pointer map data for the given stackmap index
   641  // (the index is encoded in PCDATA_StackMapIndex).
   642  BitVector	runtime_stackmapdata(StackMap *stackmap, int32 n);
   643  
   644  // defined in mgc0.go
   645  void	runtime_gc_m_ptr(Eface*);
   646  void	runtime_gc_g_ptr(Eface*);
   647  void	runtime_gc_itab_ptr(Eface*);
   648  
   649  void	runtime_memorydump(void);
   650  int32	runtime_setgcpercent(int32);
   651  
   652  // Value we use to mark dead pointers when GODEBUG=gcdead=1.
   653  #define PoisonGC ((uintptr)0xf969696969696969ULL)
   654  #define PoisonStack ((uintptr)0x6868686868686868ULL)
   655  
   656  struct Workbuf;
   657  void	runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
   658  void	runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
   659  void	runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
   660  void	runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));