github.com/razvanm/vanadium-go-1.3@v0.0.0-20160721203343-4a65068e5915/src/runtime/malloc.h (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator, based on tcmalloc.
     6  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     7  
     8  // The main allocator works in runs of pages.
     9  // Small allocation sizes (up to and including 32 kB) are
    10  // rounded to one of about 100 size classes, each of which
    11  // has its own free list of objects of exactly that size.
    12  // Any free page of memory can be split into a set of objects
    13  // of one size class, which are then managed using free list
    14  // allocators.
    15  //
    16  // The allocator's data structures are:
    17  //
    18  //	FixAlloc: a free-list allocator for fixed-size objects,
    19  //		used to manage storage used by the allocator.
    20  //	MHeap: the malloc heap, managed at page (4096-byte) granularity.
    21  //	MSpan: a run of pages managed by the MHeap.
    22  //	MCentral: a shared free list for a given size class.
    23  //	MCache: a per-thread (in Go, per-P) cache for small objects.
    24  //	MStats: allocation statistics.
    25  //
    26  // Allocating a small object proceeds up a hierarchy of caches:
    27  //
    28  //	1. Round the size up to one of the small size classes
    29  //	   and look in the corresponding MCache free list.
    30  //	   If the list is not empty, allocate an object from it.
    31  //	   This can all be done without acquiring a lock.
    32  //
    33  //	2. If the MCache free list is empty, replenish it by
    34  //	   taking a bunch of objects from the MCentral free list.
    35  //	   Moving a bunch amortizes the cost of acquiring the MCentral lock.
    36  //
    37  //	3. If the MCentral free list is empty, replenish it by
    38  //	   allocating a run of pages from the MHeap and then
    39  //	   chopping that memory into a objects of the given size.
    40  //	   Allocating many objects amortizes the cost of locking
    41  //	   the heap.
    42  //
    43  //	4. If the MHeap is empty or has no page runs large enough,
    44  //	   allocate a new group of pages (at least 1MB) from the
    45  //	   operating system.  Allocating a large run of pages
    46  //	   amortizes the cost of talking to the operating system.
    47  //
    48  // Freeing a small object proceeds up the same hierarchy:
    49  //
    50  //	1. Look up the size class for the object and add it to
    51  //	   the MCache free list.
    52  //
    53  //	2. If the MCache free list is too long or the MCache has
    54  //	   too much memory, return some to the MCentral free lists.
    55  //
    56  //	3. If all the objects in a given span have returned to
    57  //	   the MCentral list, return that span to the page heap.
    58  //
    59  //	4. If the heap has too much memory, return some to the
    60  //	   operating system.
    61  //
    62  //	TODO(rsc): Step 4 is not implemented.
    63  //
    64  // Allocating and freeing a large object uses the page heap
    65  // directly, bypassing the MCache and MCentral free lists.
    66  //
    67  // The small objects on the MCache and MCentral free lists
    68  // may or may not be zeroed.  They are zeroed if and only if
    69  // the second word of the object is zero.  A span in the
    70  // page heap is zeroed unless s->needzero is set. When a span
    71  // is allocated to break into small objects, it is zeroed if needed
    72  // and s->needzero is set. There are two main benefits to delaying the
    73  // zeroing this way:
    74  //
    75  //	1. stack frames allocated from the small object lists
    76  //	   or the page heap can avoid zeroing altogether.
    77  //	2. the cost of zeroing when reusing a small object is
    78  //	   charged to the mutator, not the garbage collector.
    79  //
    80  // This C code was written with an eye toward translating to Go
    81  // in the future.  Methods have the form Type_Method(Type *t, ...).
    82  
    83  typedef struct MCentral	MCentral;
    84  typedef struct MHeap	MHeap;
    85  typedef struct MSpan	MSpan;
    86  typedef struct MStats	MStats;
    87  typedef struct MLink	MLink;
    88  typedef struct GCStats	GCStats;
    89  
    90  enum
    91  {
    92  	PageShift	= 13,
    93  	PageSize	= 1<<PageShift,
    94  	PageMask	= PageSize - 1,
    95  };
    96  typedef	uintptr	pageID;		// address >> PageShift
    97  
    98  enum
    99  {
   100  	// Computed constant.  The definition of MaxSmallSize and the
   101  	// algorithm in msize.c produce some number of different allocation
   102  	// size classes.  NumSizeClasses is that number.  It's needed here
   103  	// because there are static arrays of this length; when msize runs its
   104  	// size choosing algorithm it double-checks that NumSizeClasses agrees.
   105  	NumSizeClasses = 67,
   106  
   107  	// Tunable constants.
   108  	MaxSmallSize = 32<<10,
   109  
   110  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
   111  	TinySize = 16,
   112  	TinySizeClass = 2,
   113  
   114  	FixAllocChunk = 16<<10,		// Chunk size for FixAlloc
   115  	MaxMHeapList = 1<<(20 - PageShift),	// Maximum page length for fixed-size list in MHeap.
   116  	HeapAllocChunk = 1<<20,		// Chunk size for heap growth
   117  
   118  	// Per-P, per order stack segment cache size.
   119  	StackCacheSize = 32*1024,
   120  	// Number of orders that get caching.  Order 0 is FixedStack
   121  	// and each successive order is twice as large.
   122  	NumStackOrders = 3,
   123  
   124  	// Number of bits in page to span calculations (4k pages).
   125  	// On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason).
   126  	// On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
   127  	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
   128  #ifdef _64BIT
   129  #ifdef GOOS_windows
   130  	// Windows counts memory used by page table into committed memory
   131  	// of the process, so we can't reserve too much memory.
   132  	// See http://golang.org/issue/5402 and http://golang.org/issue/5236.
   133  	MHeapMap_Bits = 35 - PageShift,
   134  #else
   135  	MHeapMap_Bits = 37 - PageShift,
   136  #endif
   137  #else
   138  	MHeapMap_Bits = 32 - PageShift,
   139  #endif
   140  
   141  	// Max number of threads to run garbage collection.
   142  	// 2, 3, and 4 are all plausible maximums depending
   143  	// on the hardware details of the machine.  The garbage
   144  	// collector scales well to 32 cpus.
   145  	MaxGcproc = 32,
   146  };
   147  
   148  // Maximum memory allocation size, a hint for callers.
   149  // This must be a #define instead of an enum because it
   150  // is so large.
   151  #ifdef _64BIT
   152  #define	MaxMem	(1ULL<<(MHeapMap_Bits+PageShift))	/* 128 GB or 32 GB */
   153  #else
   154  #define	MaxMem	((uintptr)-1)
   155  #endif
   156  
   157  // A generic linked list of blocks.  (Typically the block is bigger than sizeof(MLink).)
   158  struct MLink
   159  {
   160  	MLink *next;
   161  };
   162  
   163  // sysAlloc obtains a large chunk of zeroed memory from the
   164  // operating system, typically on the order of a hundred kilobytes
   165  // or a megabyte.
   166  // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
   167  // may use larger alignment, so the caller must be careful to realign the
   168  // memory obtained by sysAlloc.
   169  //
   170  // SysUnused notifies the operating system that the contents
   171  // of the memory region are no longer needed and can be reused
   172  // for other purposes.
   173  // SysUsed notifies the operating system that the contents
   174  // of the memory region are needed again.
   175  //
   176  // SysFree returns it unconditionally; this is only used if
   177  // an out-of-memory error has been detected midway through
   178  // an allocation.  It is okay if SysFree is a no-op.
   179  //
   180  // SysReserve reserves address space without allocating memory.
   181  // If the pointer passed to it is non-nil, the caller wants the
   182  // reservation there, but SysReserve can still choose another
   183  // location if that one is unavailable.  On some systems and in some
   184  // cases SysReserve will simply check that the address space is
   185  // available and not actually reserve it.  If SysReserve returns
   186  // non-nil, it sets *reserved to true if the address space is
   187  // reserved, false if it has merely been checked.
   188  // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
   189  // may use larger alignment, so the caller must be careful to realign the
   190  // memory obtained by sysAlloc.
   191  //
   192  // SysMap maps previously reserved address space for use.
   193  // The reserved argument is true if the address space was really
   194  // reserved, not merely checked.
   195  //
   196  // SysFault marks a (already sysAlloc'd) region to fault
   197  // if accessed.  Used only for debugging the runtime.
   198  
   199  void*	runtime·sysAlloc(uintptr nbytes, uint64 *stat);
   200  void	runtime·SysFree(void *v, uintptr nbytes, uint64 *stat);
   201  void	runtime·SysUnused(void *v, uintptr nbytes);
   202  void	runtime·SysUsed(void *v, uintptr nbytes);
   203  void	runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
   204  void*	runtime·SysReserve(void *v, uintptr nbytes, bool *reserved);
   205  void	runtime·SysFault(void *v, uintptr nbytes);
   206  
   207  // FixAlloc is a simple free-list allocator for fixed size objects.
   208  // Malloc uses a FixAlloc wrapped around sysAlloc to manages its
   209  // MCache and MSpan objects.
   210  //
   211  // Memory returned by FixAlloc_Alloc is not zeroed.
   212  // The caller is responsible for locking around FixAlloc calls.
   213  // Callers can keep state in the object but the first word is
   214  // smashed by freeing and reallocating.
   215  struct FixAlloc
   216  {
   217  	uintptr	size;
   218  	void	(*first)(void *arg, byte *p);	// called first time p is returned
   219  	void*	arg;
   220  	MLink*	list;
   221  	byte*	chunk;
   222  	uint32	nchunk;
   223  	uintptr	inuse;	// in-use bytes now
   224  	uint64*	stat;
   225  };
   226  
   227  void	runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat);
   228  void*	runtime·FixAlloc_Alloc(FixAlloc *f);
   229  void	runtime·FixAlloc_Free(FixAlloc *f, void *p);
   230  
   231  
   232  // Statistics.
   233  // Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
   234  struct MStats
   235  {
   236  	// General statistics.
   237  	uint64	alloc;		// bytes allocated and still in use
   238  	uint64	total_alloc;	// bytes allocated (even if freed)
   239  	uint64	sys;		// bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
   240  	uint64	nlookup;	// number of pointer lookups
   241  	uint64	nmalloc;	// number of mallocs
   242  	uint64	nfree;  // number of frees
   243  
   244  	// Statistics about malloc heap.
   245  	// protected by mheap.lock
   246  	uint64	heap_alloc;	// bytes allocated and still in use
   247  	uint64	heap_sys;	// bytes obtained from system
   248  	uint64	heap_idle;	// bytes in idle spans
   249  	uint64	heap_inuse;	// bytes in non-idle spans
   250  	uint64	heap_released;	// bytes released to the OS
   251  	uint64	heap_objects;	// total number of allocated objects
   252  
   253  	// Statistics about allocation of low-level fixed-size structures.
   254  	// Protected by FixAlloc locks.
   255  	uint64	stacks_inuse;	// this number is included in heap_inuse above
   256  	uint64	stacks_sys;	// always 0 in mstats
   257  	uint64	mspan_inuse;	// MSpan structures
   258  	uint64	mspan_sys;
   259  	uint64	mcache_inuse;	// MCache structures
   260  	uint64	mcache_sys;
   261  	uint64	buckhash_sys;	// profiling bucket hash table
   262  	uint64	gc_sys;
   263  	uint64	other_sys;
   264  
   265  	// Statistics about garbage collector.
   266  	// Protected by mheap or stopping the world during GC.
   267  	uint64	next_gc;	// next GC (in heap_alloc time)
   268  	uint64  last_gc;	// last GC (in absolute time)
   269  	uint64	pause_total_ns;
   270  	uint64	pause_ns[256];
   271  	uint32	numgc;
   272  	bool	enablegc;
   273  	bool	debuggc;
   274  
   275  	// Statistics about allocation size classes.
   276  	
   277  	struct MStatsBySize {
   278  		uint32 size;
   279  		uint64 nmalloc;
   280  		uint64 nfree;
   281  	} by_size[NumSizeClasses];
   282  	
   283  	uint64	tinyallocs;	// number of tiny allocations that didn't cause actual allocation; not exported to Go directly
   284  };
   285  
   286  
   287  #define mstats runtime·memstats
   288  extern MStats mstats;
   289  void	runtime·updatememstats(GCStats *stats);
   290  void	runtime·ReadMemStats(MStats *stats);
   291  
   292  // Size classes.  Computed and initialized by InitSizes.
   293  //
   294  // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
   295  //	1 <= sizeclass < NumSizeClasses, for n.
   296  //	Size class 0 is reserved to mean "not small".
   297  //
   298  // class_to_size[i] = largest size in class i
   299  // class_to_allocnpages[i] = number of pages to allocate when
   300  //	making new objects in class i
   301  
   302  int32	runtime·SizeToClass(int32);
   303  uintptr	runtime·roundupsize(uintptr);
   304  extern	int32	runtime·class_to_size[NumSizeClasses];
   305  extern	int32	runtime·class_to_allocnpages[NumSizeClasses];
   306  extern	int8	runtime·size_to_class8[1024/8 + 1];
   307  extern	int8	runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1];
   308  extern	void	runtime·InitSizes(void);
   309  
   310  typedef struct MCacheList MCacheList;
   311  struct MCacheList
   312  {
   313  	MLink *list;
   314  	uint32 nlist;
   315  };
   316  
   317  typedef struct StackFreeList StackFreeList;
   318  struct StackFreeList
   319  {
   320  	MLink *list;  // linked list of free stacks
   321  	uintptr size; // total size of stacks in list
   322  };
   323  
   324  typedef struct SudoG SudoG;
   325  
   326  // Per-thread (in Go, per-P) cache for small objects.
   327  // No locking needed because it is per-thread (per-P).
   328  struct MCache
   329  {
   330  	// The following members are accessed on every malloc,
   331  	// so they are grouped here for better caching.
   332  	int32 next_sample;		// trigger heap sample after allocating this many bytes
   333  	intptr local_cachealloc;	// bytes allocated (or freed) from cache since last lock of heap
   334  	// Allocator cache for tiny objects w/o pointers.
   335  	// See "Tiny allocator" comment in malloc.goc.
   336  	byte*	tiny;
   337  	uintptr	tinysize;
   338  	uintptr	local_tinyallocs;	// number of tiny allocs not counted in other stats
   339  	// The rest is not accessed on every malloc.
   340  	MSpan*	alloc[NumSizeClasses];	// spans to allocate from
   341  
   342  	StackFreeList stackcache[NumStackOrders];
   343  
   344  	SudoG*	sudogcache;
   345  
   346  	void*	gcworkbuf;
   347  
   348  	// Local allocator stats, flushed during GC.
   349  	uintptr local_nlookup;		// number of pointer lookups
   350  	uintptr local_largefree;	// bytes freed for large objects (>MaxSmallSize)
   351  	uintptr local_nlargefree;	// number of frees for large objects (>MaxSmallSize)
   352  	uintptr local_nsmallfree[NumSizeClasses];	// number of frees for small objects (<=MaxSmallSize)
   353  };
   354  
   355  MSpan*	runtime·MCache_Refill(MCache *c, int32 sizeclass);
   356  void	runtime·MCache_ReleaseAll(MCache *c);
   357  void	runtime·stackcache_clear(MCache *c);
   358  void	runtime·gcworkbuffree(void *b);
   359  
   360  enum
   361  {
   362  	KindSpecialFinalizer = 1,
   363  	KindSpecialProfile = 2,
   364  	// Note: The finalizer special must be first because if we're freeing
   365  	// an object, a finalizer special will cause the freeing operation
   366  	// to abort, and we want to keep the other special records around
   367  	// if that happens.
   368  };
   369  
   370  typedef struct Special Special;
   371  struct Special
   372  {
   373  	Special*	next;	// linked list in span
   374  	uint16		offset;	// span offset of object
   375  	byte		kind;	// kind of Special
   376  };
   377  
   378  // The described object has a finalizer set for it.
   379  typedef struct SpecialFinalizer SpecialFinalizer;
   380  struct SpecialFinalizer
   381  {
   382  	Special		special;
   383  	FuncVal*	fn;
   384  	uintptr		nret;
   385  	Type*		fint;
   386  	PtrType*	ot;
   387  };
   388  
   389  // The described object is being heap profiled.
   390  typedef struct Bucket Bucket; // from mprof.h
   391  typedef struct SpecialProfile SpecialProfile;
   392  struct SpecialProfile
   393  {
   394  	Special	special;
   395  	Bucket*	b;
   396  };
   397  
   398  // An MSpan is a run of pages.
   399  enum
   400  {
   401  	MSpanInUse = 0, // allocated for garbage collected heap
   402  	MSpanStack,     // allocated for use by stack allocator
   403  	MSpanFree,
   404  	MSpanListHead,
   405  	MSpanDead,
   406  };
   407  struct MSpan
   408  {
   409  	MSpan	*next;		// in a span linked list
   410  	MSpan	*prev;		// in a span linked list
   411  	pageID	start;		// starting page number
   412  	uintptr	npages;		// number of pages in span
   413  	MLink	*freelist;	// list of free objects
   414  	// sweep generation:
   415  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   416  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   417  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   418  	// h->sweepgen is incremented by 2 after every GC
   419  	uint32	sweepgen;
   420  	uint16	ref;		// capacity - number of objects in freelist
   421  	uint8	sizeclass;	// size class
   422  	bool	incache;	// being used by an MCache
   423  	uint8	state;		// MSpanInUse etc
   424  	uint8	needzero;	// needs to be zeroed before allocation
   425  	uintptr	elemsize;	// computed from sizeclass or from npages
   426  	int64   unusedsince;	// First time spotted by GC in MSpanFree state
   427  	uintptr npreleased;	// number of pages released to the OS
   428  	byte	*limit;		// end of data in span
   429  	Mutex	specialLock;	// guards specials list
   430  	Special	*specials;	// linked list of special records sorted by offset.
   431  };
   432  
   433  void	runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages);
   434  void	runtime·MSpan_EnsureSwept(MSpan *span);
   435  bool	runtime·MSpan_Sweep(MSpan *span, bool preserve);
   436  
   437  // Every MSpan is in one doubly-linked list,
   438  // either one of the MHeap's free lists or one of the
   439  // MCentral's span lists.  We use empty MSpan structures as list heads.
   440  void	runtime·MSpanList_Init(MSpan *list);
   441  bool	runtime·MSpanList_IsEmpty(MSpan *list);
   442  void	runtime·MSpanList_Insert(MSpan *list, MSpan *span);
   443  void	runtime·MSpanList_InsertBack(MSpan *list, MSpan *span);
   444  void	runtime·MSpanList_Remove(MSpan *span);	// from whatever list it is in
   445  
   446  
   447  // Central list of free objects of a given size.
   448  struct MCentral
   449  {
   450  	Mutex  lock;
   451  	int32 sizeclass;
   452  	MSpan nonempty;	// list of spans with a free object
   453  	MSpan empty;	// list of spans with no free objects (or cached in an MCache)
   454  };
   455  
   456  void	runtime·MCentral_Init(MCentral *c, int32 sizeclass);
   457  MSpan*	runtime·MCentral_CacheSpan(MCentral *c);
   458  void	runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s);
   459  bool	runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end, bool preserve);
   460  
   461  // Main malloc heap.
   462  // The heap itself is the "free[]" and "large" arrays,
   463  // but all the other global data is here too.
   464  struct MHeap
   465  {
   466  	Mutex  lock;
   467  	MSpan free[MaxMHeapList];	// free lists of given length
   468  	MSpan freelarge;		// free lists length >= MaxMHeapList
   469  	MSpan busy[MaxMHeapList];	// busy lists of large objects of given length
   470  	MSpan busylarge;		// busy lists of large objects length >= MaxMHeapList
   471  	MSpan **allspans;		// all spans out there
   472  	MSpan **gcspans;		// copy of allspans referenced by GC marker or sweeper
   473  	uint32	nspan;
   474  	uint32	nspancap;
   475  	uint32	sweepgen;		// sweep generation, see comment in MSpan
   476  	uint32	sweepdone;		// all spans are swept
   477  
   478  	// span lookup
   479  	MSpan**	spans;
   480  	uintptr	spans_mapped;
   481  
   482  	// range of addresses we might see in the heap
   483  	byte *bitmap;
   484  	uintptr bitmap_mapped;
   485  	byte *arena_start;
   486  	byte *arena_used;
   487  	byte *arena_end;
   488  	bool arena_reserved;
   489  
   490  	// central free lists for small size classes.
   491  	// the padding makes sure that the MCentrals are
   492  	// spaced CacheLineSize bytes apart, so that each MCentral.lock
   493  	// gets its own cache line.
   494  	struct MHeapCentral {
   495  		MCentral mcentral;
   496  		byte pad[CacheLineSize];
   497  	} central[NumSizeClasses];
   498  
   499  	FixAlloc spanalloc;	// allocator for Span*
   500  	FixAlloc cachealloc;	// allocator for MCache*
   501  	FixAlloc specialfinalizeralloc;	// allocator for SpecialFinalizer*
   502  	FixAlloc specialprofilealloc;	// allocator for SpecialProfile*
   503  	Mutex speciallock; // lock for sepcial record allocators.
   504  
   505  	// Malloc stats.
   506  	uint64 largefree;	// bytes freed for large objects (>MaxSmallSize)
   507  	uint64 nlargefree;	// number of frees for large objects (>MaxSmallSize)
   508  	uint64 nsmallfree[NumSizeClasses];	// number of frees for small objects (<=MaxSmallSize)
   509  };
   510  #define runtime·mheap runtime·mheap_
   511  extern MHeap runtime·mheap;
   512  
   513  void	runtime·MHeap_Init(MHeap *h);
   514  MSpan*	runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
   515  MSpan*	runtime·MHeap_AllocStack(MHeap *h, uintptr npage);
   516  void	runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
   517  void	runtime·MHeap_FreeStack(MHeap *h, MSpan *s);
   518  MSpan*	runtime·MHeap_Lookup(MHeap *h, void *v);
   519  MSpan*	runtime·MHeap_LookupMaybe(MHeap *h, void *v);
   520  void*	runtime·MHeap_SysAlloc(MHeap *h, uintptr n);
   521  void	runtime·MHeap_MapBits(MHeap *h);
   522  void	runtime·MHeap_MapSpans(MHeap *h);
   523  void	runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit);
   524  
   525  void*	runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
   526  int32	runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
   527  uintptr	runtime·sweepone(void);
   528  void	runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
   529  void	runtime·unmarkspan(void *v, uintptr size);
   530  void	runtime·purgecachedstats(MCache*);
   531  void	runtime·tracealloc(void*, uintptr, Type*);
   532  void	runtime·tracefree(void*, uintptr);
   533  void	runtime·tracegc(void);
   534  
   535  int32	runtime·gcpercent;
   536  int32	runtime·readgogc(void);
   537  void	runtime·clearpools(void);
   538  
   539  enum
   540  {
   541  	// flags to malloc
   542  	FlagNoScan	= 1<<0,	// GC doesn't have to scan object
   543  	FlagNoZero	= 1<<1, // don't zero memory
   544  };
   545  
   546  void	runtime·mProf_Malloc(void*, uintptr);
   547  void	runtime·mProf_Free(Bucket*, uintptr, bool);
   548  void	runtime·mProf_GC(void);
   549  void	runtime·iterate_memprof(void (**callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr));
   550  int32	runtime·gcprocs(void);
   551  void	runtime·helpgc(int32 nproc);
   552  void	runtime·gchelper(void);
   553  void	runtime·createfing(void);
   554  G*	runtime·wakefing(void);
   555  void	runtime·getgcmask(byte*, Type*, byte**, uintptr*);
   556  
   557  // NOTE: Layout known to queuefinalizer.
   558  typedef struct Finalizer Finalizer;
   559  struct Finalizer
   560  {
   561  	FuncVal *fn;	// function to call
   562  	void *arg;	// ptr to object
   563  	uintptr nret;	// bytes of return values from fn
   564  	Type *fint;	// type of first argument of fn
   565  	PtrType *ot;	// type of ptr to object
   566  };
   567  
   568  typedef struct FinBlock FinBlock;
   569  struct FinBlock
   570  {
   571  	FinBlock *alllink;
   572  	FinBlock *next;
   573  	int32 cnt;
   574  	int32 cap;
   575  	Finalizer fin[1];
   576  };
   577  extern Mutex	runtime·finlock;	// protects the following variables
   578  extern G*	runtime·fing;
   579  extern bool	runtime·fingwait;
   580  extern bool	runtime·fingwake;
   581  extern FinBlock	*runtime·finq;		// list of finalizers that are to be executed
   582  extern FinBlock	*runtime·finc;		// cache of free blocks
   583  
   584  void	runtime·setprofilebucket_m(void);
   585  
   586  bool	runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
   587  void	runtime·removefinalizer(void*);
   588  void	runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot);
   589  bool	runtime·freespecial(Special *s, void *p, uintptr size, bool freed);
   590  
   591  // Information from the compiler about the layout of stack frames.
   592  struct BitVector
   593  {
   594  	int32 n; // # of bits
   595  	uint8 *bytedata;
   596  };
   597  typedef struct StackMap StackMap;
   598  struct StackMap
   599  {
   600  	int32 n; // number of bitmaps
   601  	int32 nbit; // number of bits in each bitmap
   602  	uint8 bytedata[]; // bitmaps, each starting on a 32-bit boundary
   603  };
   604  // Returns pointer map data for the given stackmap index
   605  // (the index is encoded in PCDATA_StackMapIndex).
   606  BitVector	runtime·stackmapdata(StackMap *stackmap, int32 n);
   607  
   608  extern	BitVector	runtime·gcdatamask;
   609  extern	BitVector	runtime·gcbssmask;
   610  
   611  // defined in mgc0.go
   612  void	runtime·gc_m_ptr(Eface*);
   613  void	runtime·gc_g_ptr(Eface*);
   614  void	runtime·gc_itab_ptr(Eface*);
   615  
   616  void  runtime·setgcpercent_m(void);
   617  
   618  // Value we use to mark dead pointers when GODEBUG=gcdead=1.
   619  #define PoisonGC ((uintptr)0xf969696969696969ULL)
   620  #define PoisonStack ((uintptr)0x6868686868686868ULL)