github.com/rohankumardubey/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/malloc.h (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator, based on tcmalloc. 6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 7 8 // The main allocator works in runs of pages. 9 // Small allocation sizes (up to and including 32 kB) are 10 // rounded to one of about 100 size classes, each of which 11 // has its own free list of objects of exactly that size. 12 // Any free page of memory can be split into a set of objects 13 // of one size class, which are then managed using free list 14 // allocators. 15 // 16 // The allocator's data structures are: 17 // 18 // FixAlloc: a free-list allocator for fixed-size objects, 19 // used to manage storage used by the allocator. 20 // MHeap: the malloc heap, managed at page (4096-byte) granularity. 21 // MSpan: a run of pages managed by the MHeap. 22 // MCentral: a shared free list for a given size class. 23 // MCache: a per-thread (in Go, per-M) cache for small objects. 24 // MStats: allocation statistics. 25 // 26 // Allocating a small object proceeds up a hierarchy of caches: 27 // 28 // 1. Round the size up to one of the small size classes 29 // and look in the corresponding MCache free list. 30 // If the list is not empty, allocate an object from it. 31 // This can all be done without acquiring a lock. 32 // 33 // 2. If the MCache free list is empty, replenish it by 34 // taking a bunch of objects from the MCentral free list. 35 // Moving a bunch amortizes the cost of acquiring the MCentral lock. 36 // 37 // 3. If the MCentral free list is empty, replenish it by 38 // allocating a run of pages from the MHeap and then 39 // chopping that memory into a objects of the given size. 40 // Allocating many objects amortizes the cost of locking 41 // the heap. 42 // 43 // 4. If the MHeap is empty or has no page runs large enough, 44 // allocate a new group of pages (at least 1MB) from the 45 // operating system. Allocating a large run of pages 46 // amortizes the cost of talking to the operating system. 47 // 48 // Freeing a small object proceeds up the same hierarchy: 49 // 50 // 1. Look up the size class for the object and add it to 51 // the MCache free list. 52 // 53 // 2. If the MCache free list is too long or the MCache has 54 // too much memory, return some to the MCentral free lists. 55 // 56 // 3. If all the objects in a given span have returned to 57 // the MCentral list, return that span to the page heap. 58 // 59 // 4. If the heap has too much memory, return some to the 60 // operating system. 61 // 62 // TODO(rsc): Step 4 is not implemented. 63 // 64 // Allocating and freeing a large object uses the page heap 65 // directly, bypassing the MCache and MCentral free lists. 66 // 67 // The small objects on the MCache and MCentral free lists 68 // may or may not be zeroed. They are zeroed if and only if 69 // the second word of the object is zero. The spans in the 70 // page heap are always zeroed. When a span full of objects 71 // is returned to the page heap, the objects that need to be 72 // are zeroed first. There are two main benefits to delaying the 73 // zeroing this way: 74 // 75 // 1. stack frames allocated from the small object lists 76 // can avoid zeroing altogether. 77 // 2. the cost of zeroing when reusing a small object is 78 // charged to the mutator, not the garbage collector. 79 // 80 // This C code was written with an eye toward translating to Go 81 // in the future. Methods have the form Type_Method(Type *t, ...). 82 83 typedef struct MCentral MCentral; 84 typedef struct MHeap MHeap; 85 typedef struct MSpan MSpan; 86 typedef struct MStats MStats; 87 typedef struct MLink MLink; 88 typedef struct MTypes MTypes; 89 typedef struct GCStats GCStats; 90 91 enum 92 { 93 PageShift = 12, 94 PageSize = 1<<PageShift, 95 PageMask = PageSize - 1, 96 }; 97 typedef uintptr PageID; // address >> PageShift 98 99 enum 100 { 101 // Computed constant. The definition of MaxSmallSize and the 102 // algorithm in msize.c produce some number of different allocation 103 // size classes. NumSizeClasses is that number. It's needed here 104 // because there are static arrays of this length; when msize runs its 105 // size choosing algorithm it double-checks that NumSizeClasses agrees. 106 NumSizeClasses = 61, 107 108 // Tunable constants. 109 MaxSmallSize = 32<<10, 110 111 FixAllocChunk = 16<<10, // Chunk size for FixAlloc 112 MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap. 113 HeapAllocChunk = 1<<20, // Chunk size for heap growth 114 115 // Number of bits in page to span calculations (4k pages). 116 // On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason). 117 // On other 64-bit platforms, we limit the arena to 128GB, or 37 bits. 118 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 119 #ifdef _64BIT 120 #ifdef GOOS_windows 121 // Windows counts memory used by page table into committed memory 122 // of the process, so we can't reserve too much memory. 123 // See http://golang.org/issue/5402 and http://golang.org/issue/5236. 124 MHeapMap_Bits = 35 - PageShift, 125 #else 126 MHeapMap_Bits = 37 - PageShift, 127 #endif 128 #else 129 MHeapMap_Bits = 32 - PageShift, 130 #endif 131 132 // Max number of threads to run garbage collection. 133 // 2, 3, and 4 are all plausible maximums depending 134 // on the hardware details of the machine. The garbage 135 // collector scales well to 8 cpus. 136 MaxGcproc = 8, 137 }; 138 139 // Maximum memory allocation size, a hint for callers. 140 // This must be a #define instead of an enum because it 141 // is so large. 142 #ifdef _64BIT 143 #define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */ 144 #else 145 #define MaxMem ((uintptr)-1) 146 #endif 147 148 // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).) 149 struct MLink 150 { 151 MLink *next; 152 }; 153 154 // SysAlloc obtains a large chunk of zeroed memory from the 155 // operating system, typically on the order of a hundred kilobytes 156 // or a megabyte. 157 // 158 // SysUnused notifies the operating system that the contents 159 // of the memory region are no longer needed and can be reused 160 // for other purposes. 161 // SysUsed notifies the operating system that the contents 162 // of the memory region are needed again. 163 // 164 // SysFree returns it unconditionally; this is only used if 165 // an out-of-memory error has been detected midway through 166 // an allocation. It is okay if SysFree is a no-op. 167 // 168 // SysReserve reserves address space without allocating memory. 169 // If the pointer passed to it is non-nil, the caller wants the 170 // reservation there, but SysReserve can still choose another 171 // location if that one is unavailable. 172 // 173 // SysMap maps previously reserved address space for use. 174 175 void* runtime·SysAlloc(uintptr nbytes, uint64 *stat); 176 void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat); 177 void runtime·SysUnused(void *v, uintptr nbytes); 178 void runtime·SysUsed(void *v, uintptr nbytes); 179 void runtime·SysMap(void *v, uintptr nbytes, uint64 *stat); 180 void* runtime·SysReserve(void *v, uintptr nbytes); 181 182 // FixAlloc is a simple free-list allocator for fixed size objects. 183 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its 184 // MCache and MSpan objects. 185 // 186 // Memory returned by FixAlloc_Alloc is not zeroed. 187 // The caller is responsible for locking around FixAlloc calls. 188 // Callers can keep state in the object but the first word is 189 // smashed by freeing and reallocating. 190 struct FixAlloc 191 { 192 uintptr size; 193 void (*first)(void *arg, byte *p); // called first time p is returned 194 void* arg; 195 MLink* list; 196 byte* chunk; 197 uint32 nchunk; 198 uintptr inuse; // in-use bytes now 199 uint64* stat; 200 }; 201 202 void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat); 203 void* runtime·FixAlloc_Alloc(FixAlloc *f); 204 void runtime·FixAlloc_Free(FixAlloc *f, void *p); 205 206 207 // Statistics. 208 // Shared with Go: if you edit this structure, also edit type MemStats in mem.go. 209 struct MStats 210 { 211 // General statistics. 212 uint64 alloc; // bytes allocated and still in use 213 uint64 total_alloc; // bytes allocated (even if freed) 214 uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) 215 uint64 nlookup; // number of pointer lookups 216 uint64 nmalloc; // number of mallocs 217 uint64 nfree; // number of frees 218 219 // Statistics about malloc heap. 220 // protected by mheap.Lock 221 uint64 heap_alloc; // bytes allocated and still in use 222 uint64 heap_sys; // bytes obtained from system 223 uint64 heap_idle; // bytes in idle spans 224 uint64 heap_inuse; // bytes in non-idle spans 225 uint64 heap_released; // bytes released to the OS 226 uint64 heap_objects; // total number of allocated objects 227 228 // Statistics about allocation of low-level fixed-size structures. 229 // Protected by FixAlloc locks. 230 uint64 stacks_inuse; // bootstrap stacks 231 uint64 stacks_sys; 232 uint64 mspan_inuse; // MSpan structures 233 uint64 mspan_sys; 234 uint64 mcache_inuse; // MCache structures 235 uint64 mcache_sys; 236 uint64 buckhash_sys; // profiling bucket hash table 237 uint64 gc_sys; 238 uint64 other_sys; 239 240 // Statistics about garbage collector. 241 // Protected by mheap or stopping the world during GC. 242 uint64 next_gc; // next GC (in heap_alloc time) 243 uint64 last_gc; // last GC (in absolute time) 244 uint64 pause_total_ns; 245 uint64 pause_ns[256]; 246 uint32 numgc; 247 bool enablegc; 248 bool debuggc; 249 250 // Statistics about allocation size classes. 251 struct { 252 uint32 size; 253 uint64 nmalloc; 254 uint64 nfree; 255 } by_size[NumSizeClasses]; 256 }; 257 258 #define mstats runtime·memStats /* name shared with Go */ 259 extern MStats mstats; 260 261 // Size classes. Computed and initialized by InitSizes. 262 // 263 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class, 264 // 1 <= sizeclass < NumSizeClasses, for n. 265 // Size class 0 is reserved to mean "not small". 266 // 267 // class_to_size[i] = largest size in class i 268 // class_to_allocnpages[i] = number of pages to allocate when 269 // making new objects in class i 270 271 int32 runtime·SizeToClass(int32); 272 extern int32 runtime·class_to_size[NumSizeClasses]; 273 extern int32 runtime·class_to_allocnpages[NumSizeClasses]; 274 extern int8 runtime·size_to_class8[1024/8 + 1]; 275 extern int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1]; 276 extern void runtime·InitSizes(void); 277 278 279 // Per-thread (in Go, per-M) cache for small objects. 280 // No locking needed because it is per-thread (per-M). 281 typedef struct MCacheList MCacheList; 282 struct MCacheList 283 { 284 MLink *list; 285 uint32 nlist; 286 }; 287 288 struct MCache 289 { 290 // The following members are accessed on every malloc, 291 // so they are grouped here for better caching. 292 int32 next_sample; // trigger heap sample after allocating this many bytes 293 intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap 294 // The rest is not accessed on every malloc. 295 MCacheList list[NumSizeClasses]; 296 // Local allocator stats, flushed during GC. 297 uintptr local_nlookup; // number of pointer lookups 298 uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize) 299 uintptr local_nlargefree; // number of frees for large objects (>MaxSmallSize) 300 uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize) 301 }; 302 303 void runtime·MCache_Refill(MCache *c, int32 sizeclass); 304 void runtime·MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size); 305 void runtime·MCache_ReleaseAll(MCache *c); 306 307 // MTypes describes the types of blocks allocated within a span. 308 // The compression field describes the layout of the data. 309 // 310 // MTypes_Empty: 311 // All blocks are free, or no type information is available for 312 // allocated blocks. 313 // The data field has no meaning. 314 // MTypes_Single: 315 // The span contains just one block. 316 // The data field holds the type information. 317 // The sysalloc field has no meaning. 318 // MTypes_Words: 319 // The span contains multiple blocks. 320 // The data field points to an array of type [NumBlocks]uintptr, 321 // and each element of the array holds the type of the corresponding 322 // block. 323 // MTypes_Bytes: 324 // The span contains at most seven different types of blocks. 325 // The data field points to the following structure: 326 // struct { 327 // type [8]uintptr // type[0] is always 0 328 // index [NumBlocks]byte 329 // } 330 // The type of the i-th block is: data.type[data.index[i]] 331 enum 332 { 333 MTypes_Empty = 0, 334 MTypes_Single = 1, 335 MTypes_Words = 2, 336 MTypes_Bytes = 3, 337 }; 338 struct MTypes 339 { 340 byte compression; // one of MTypes_* 341 uintptr data; 342 }; 343 344 // An MSpan is a run of pages. 345 enum 346 { 347 MSpanInUse = 0, 348 MSpanFree, 349 MSpanListHead, 350 MSpanDead, 351 }; 352 struct MSpan 353 { 354 MSpan *next; // in a span linked list 355 MSpan *prev; // in a span linked list 356 PageID start; // starting page number 357 uintptr npages; // number of pages in span 358 MLink *freelist; // list of free objects 359 uint32 ref; // number of allocated objects in this span 360 int32 sizeclass; // size class 361 uintptr elemsize; // computed from sizeclass or from npages 362 uint32 state; // MSpanInUse etc 363 int64 unusedsince; // First time spotted by GC in MSpanFree state 364 uintptr npreleased; // number of pages released to the OS 365 byte *limit; // end of data in span 366 MTypes types; // types of allocated objects in this span 367 }; 368 369 void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages); 370 371 // Every MSpan is in one doubly-linked list, 372 // either one of the MHeap's free lists or one of the 373 // MCentral's span lists. We use empty MSpan structures as list heads. 374 void runtime·MSpanList_Init(MSpan *list); 375 bool runtime·MSpanList_IsEmpty(MSpan *list); 376 void runtime·MSpanList_Insert(MSpan *list, MSpan *span); 377 void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in 378 379 380 // Central list of free objects of a given size. 381 struct MCentral 382 { 383 Lock; 384 int32 sizeclass; 385 MSpan nonempty; 386 MSpan empty; 387 int32 nfree; 388 }; 389 390 void runtime·MCentral_Init(MCentral *c, int32 sizeclass); 391 int32 runtime·MCentral_AllocList(MCentral *c, MLink **first); 392 void runtime·MCentral_FreeList(MCentral *c, MLink *first); 393 void runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end); 394 395 // Main malloc heap. 396 // The heap itself is the "free[]" and "large" arrays, 397 // but all the other global data is here too. 398 struct MHeap 399 { 400 Lock; 401 MSpan free[MaxMHeapList]; // free lists of given length 402 MSpan large; // free lists length >= MaxMHeapList 403 MSpan **allspans; 404 uint32 nspan; 405 uint32 nspancap; 406 407 // span lookup 408 MSpan** spans; 409 uintptr spans_mapped; 410 411 // range of addresses we might see in the heap 412 byte *bitmap; 413 uintptr bitmap_mapped; 414 byte *arena_start; 415 byte *arena_used; 416 byte *arena_end; 417 418 // central free lists for small size classes. 419 // the padding makes sure that the MCentrals are 420 // spaced CacheLineSize bytes apart, so that each MCentral.Lock 421 // gets its own cache line. 422 struct { 423 MCentral; 424 byte pad[CacheLineSize]; 425 } central[NumSizeClasses]; 426 427 FixAlloc spanalloc; // allocator for Span* 428 FixAlloc cachealloc; // allocator for MCache* 429 430 // Malloc stats. 431 uint64 largefree; // bytes freed for large objects (>MaxSmallSize) 432 uint64 nlargefree; // number of frees for large objects (>MaxSmallSize) 433 uint64 nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize) 434 }; 435 extern MHeap runtime·mheap; 436 437 void runtime·MHeap_Init(MHeap *h); 438 MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed); 439 void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); 440 MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); 441 MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); 442 void runtime·MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj); 443 void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n); 444 void runtime·MHeap_MapBits(MHeap *h); 445 void runtime·MHeap_MapSpans(MHeap *h); 446 void runtime·MHeap_Scavenger(void); 447 448 void* runtime·mallocgc(uintptr size, uintptr typ, uint32 flag); 449 void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat); 450 int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s); 451 void runtime·gc(int32 force); 452 void runtime·markallocated(void *v, uintptr n, bool noptr); 453 void runtime·checkallocated(void *v, uintptr n); 454 void runtime·markfreed(void *v, uintptr n); 455 void runtime·checkfreed(void *v, uintptr n); 456 extern int32 runtime·checking; 457 void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover); 458 void runtime·unmarkspan(void *v, uintptr size); 459 bool runtime·blockspecial(void*); 460 void runtime·setblockspecial(void*, bool); 461 void runtime·purgecachedstats(MCache*); 462 void* runtime·cnew(Type*); 463 void* runtime·cnewarray(Type*, intgo); 464 465 void runtime·settype_flush(M*); 466 void runtime·settype_sysfree(MSpan*); 467 uintptr runtime·gettype(void*); 468 469 enum 470 { 471 // flags to malloc 472 FlagNoScan = 1<<0, // GC doesn't have to scan object 473 FlagNoProfiling = 1<<1, // must not profile 474 FlagNoGC = 1<<2, // must not free or scan for pointers 475 FlagNoZero = 1<<3, // don't zero memory 476 FlagNoInvokeGC = 1<<4, // don't invoke GC 477 }; 478 479 void runtime·MProf_Malloc(void*, uintptr); 480 void runtime·MProf_Free(void*, uintptr); 481 void runtime·MProf_GC(void); 482 int32 runtime·gcprocs(void); 483 void runtime·helpgc(int32 nproc); 484 void runtime·gchelper(void); 485 486 void runtime·walkfintab(void (*fn)(void*)); 487 488 enum 489 { 490 TypeInfo_SingleObject = 0, 491 TypeInfo_Array = 1, 492 TypeInfo_Chan = 2, 493 494 // Enables type information at the end of blocks allocated from heap 495 DebugTypeAtBlockEnd = 0, 496 }; 497 498 // defined in mgc0.go 499 void runtime·gc_m_ptr(Eface*); 500 void runtime·gc_itab_ptr(Eface*); 501 502 void runtime·memorydump(void);