github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/malloc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator, based on tcmalloc. 6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 7 8 // The main allocator works in runs of pages. 9 // Small allocation sizes (up to and including 32 kB) are 10 // rounded to one of about 100 size classes, each of which 11 // has its own free list of objects of exactly that size. 12 // Any free page of memory can be split into a set of objects 13 // of one size class, which are then managed using free list 14 // allocators. 15 // 16 // The allocator's data structures are: 17 // 18 // FixAlloc: a free-list allocator for fixed-size objects, 19 // used to manage storage used by the allocator. 20 // MHeap: the malloc heap, managed at page (4096-byte) granularity. 21 // MSpan: a run of pages managed by the MHeap. 22 // MCentral: a shared free list for a given size class. 23 // MCache: a per-thread (in Go, per-P) cache for small objects. 24 // MStats: allocation statistics. 25 // 26 // Allocating a small object proceeds up a hierarchy of caches: 27 // 28 // 1. Round the size up to one of the small size classes 29 // and look in the corresponding MCache free list. 30 // If the list is not empty, allocate an object from it. 31 // This can all be done without acquiring a lock. 32 // 33 // 2. If the MCache free list is empty, replenish it by 34 // taking a bunch of objects from the MCentral free list. 35 // Moving a bunch amortizes the cost of acquiring the MCentral lock. 36 // 37 // 3. If the MCentral free list is empty, replenish it by 38 // allocating a run of pages from the MHeap and then 39 // chopping that memory into objects of the given size. 40 // Allocating many objects amortizes the cost of locking 41 // the heap. 42 // 43 // 4. If the MHeap is empty or has no page runs large enough, 44 // allocate a new group of pages (at least 1MB) from the 45 // operating system. Allocating a large run of pages 46 // amortizes the cost of talking to the operating system. 47 // 48 // Freeing a small object proceeds up the same hierarchy: 49 // 50 // 1. Look up the size class for the object and add it to 51 // the MCache free list. 52 // 53 // 2. If the MCache free list is too long or the MCache has 54 // too much memory, return some to the MCentral free lists. 55 // 56 // 3. If all the objects in a given span have returned to 57 // the MCentral list, return that span to the page heap. 58 // 59 // 4. If the heap has too much memory, return some to the 60 // operating system. 61 // 62 // TODO(rsc): Step 4 is not implemented. 63 // 64 // Allocating and freeing a large object uses the page heap 65 // directly, bypassing the MCache and MCentral free lists. 66 // 67 // The small objects on the MCache and MCentral free lists 68 // may or may not be zeroed. They are zeroed if and only if 69 // the second word of the object is zero. A span in the 70 // page heap is zeroed unless s->needzero is set. When a span 71 // is allocated to break into small objects, it is zeroed if needed 72 // and s->needzero is set. There are two main benefits to delaying the 73 // zeroing this way: 74 // 75 // 1. stack frames allocated from the small object lists 76 // or the page heap can avoid zeroing altogether. 77 // 2. the cost of zeroing when reusing a small object is 78 // charged to the mutator, not the garbage collector. 79 80 package runtime 81 82 import ( 83 "runtime/internal/sys" 84 "unsafe" 85 ) 86 87 const ( 88 debugMalloc = false 89 90 maxTinySize = _TinySize 91 tinySizeClass = _TinySizeClass 92 maxSmallSize = _MaxSmallSize 93 94 pageShift = _PageShift 95 pageSize = _PageSize 96 pageMask = _PageMask 97 // By construction, single page spans of the smallest object class 98 // have the most objects per span. 99 maxObjsPerSpan = pageSize / 8 100 101 mSpanInUse = _MSpanInUse 102 103 concurrentSweep = _ConcurrentSweep 104 ) 105 106 const ( 107 _PageShift = 13 108 _PageSize = 1 << _PageShift 109 _PageMask = _PageSize - 1 110 ) 111 112 const ( 113 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 114 _64bit = 1 << (^uintptr(0) >> 63) / 2 115 116 // Computed constant. The definition of MaxSmallSize and the 117 // algorithm in msize.go produces some number of different allocation 118 // size classes. NumSizeClasses is that number. It's needed here 119 // because there are static arrays of this length; when msize runs its 120 // size choosing algorithm it double-checks that NumSizeClasses agrees. 121 _NumSizeClasses = 67 122 123 // Tunable constants. 124 _MaxSmallSize = 32 << 10 125 126 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 127 _TinySize = 16 128 _TinySizeClass = 2 129 130 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 131 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. 132 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth 133 134 // Per-P, per order stack segment cache size. 135 _StackCacheSize = 32 * 1024 136 137 // Number of orders that get caching. Order 0 is FixedStack 138 // and each successive order is twice as large. 139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 140 // will be allocated directly. 141 // Since FixedStack is different on different systems, we 142 // must vary NumStackOrders to keep the same maximum cached size. 143 // OS | FixedStack | NumStackOrders 144 // -----------------+------------+--------------- 145 // linux/darwin/bsd | 2KB | 4 146 // windows/32 | 4KB | 3 147 // windows/64 | 8KB | 2 148 // plan9 | 4KB | 3 149 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 150 151 // Number of bits in page to span calculations (4k pages). 152 // On Windows 64-bit we limit the arena to 32GB or 35 bits. 153 // Windows counts memory used by page table into committed memory 154 // of the process, so we can't reserve too much memory. 155 // See https://golang.org/issue/5402 and https://golang.org/issue/5236. 156 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. 157 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 158 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, 159 // but as most devices have less than 4GB of physical memory anyway, we 160 // try to be conservative here, and only ask for a 2GB heap. 161 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32 162 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift 163 164 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1) 165 166 // Max number of threads to run garbage collection. 167 // 2, 3, and 4 are all plausible maximums depending 168 // on the hardware details of the machine. The garbage 169 // collector scales well to 32 cpus. 170 _MaxGcproc = 32 171 ) 172 173 const _MaxArena32 = 1<<32 - 1 174 175 // physPageSize is the size in bytes of the OS's physical pages. 176 // Mapping and unmapping operations must be done at multiples of 177 // physPageSize. 178 // 179 // This must be set by the OS init code (typically in osinit) before 180 // mallocinit. 181 var physPageSize uintptr 182 183 // OS-defined helpers: 184 // 185 // sysAlloc obtains a large chunk of zeroed memory from the 186 // operating system, typically on the order of a hundred kilobytes 187 // or a megabyte. 188 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator 189 // may use larger alignment, so the caller must be careful to realign the 190 // memory obtained by sysAlloc. 191 // 192 // SysUnused notifies the operating system that the contents 193 // of the memory region are no longer needed and can be reused 194 // for other purposes. 195 // SysUsed notifies the operating system that the contents 196 // of the memory region are needed again. 197 // 198 // SysFree returns it unconditionally; this is only used if 199 // an out-of-memory error has been detected midway through 200 // an allocation. It is okay if SysFree is a no-op. 201 // 202 // SysReserve reserves address space without allocating memory. 203 // If the pointer passed to it is non-nil, the caller wants the 204 // reservation there, but SysReserve can still choose another 205 // location if that one is unavailable. On some systems and in some 206 // cases SysReserve will simply check that the address space is 207 // available and not actually reserve it. If SysReserve returns 208 // non-nil, it sets *reserved to true if the address space is 209 // reserved, false if it has merely been checked. 210 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator 211 // may use larger alignment, so the caller must be careful to realign the 212 // memory obtained by sysAlloc. 213 // 214 // SysMap maps previously reserved address space for use. 215 // The reserved argument is true if the address space was really 216 // reserved, not merely checked. 217 // 218 // SysFault marks a (already sysAlloc'd) region to fault 219 // if accessed. Used only for debugging the runtime. 220 221 func mallocinit() { 222 initSizes() 223 224 if class_to_size[_TinySizeClass] != _TinySize { 225 throw("bad TinySizeClass") 226 } 227 228 // Check physPageSize. 229 if physPageSize == 0 { 230 // The OS init code failed to fetch the physical page size. 231 throw("failed to get system page size") 232 } 233 if physPageSize < minPhysPageSize { 234 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 235 throw("bad system page size") 236 } 237 if physPageSize&(physPageSize-1) != 0 { 238 print("system page size (", physPageSize, ") must be a power of 2\n") 239 throw("bad system page size") 240 } 241 242 var p, bitmapSize, spansSize, pSize, limit uintptr 243 var reserved bool 244 245 // limit = runtime.memlimit(); 246 // See https://golang.org/issue/5049 247 // TODO(rsc): Fix after 1.1. 248 limit = 0 249 250 // Set up the allocation arena, a contiguous area of memory where 251 // allocated data will be found. The arena begins with a bitmap large 252 // enough to hold 2 bits per allocated word. 253 if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) { 254 // On a 64-bit machine, allocate from a single contiguous reservation. 255 // 512 GB (MaxMem) should be big enough for now. 256 // 257 // The code will work with the reservation at any address, but ask 258 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 259 // Allocating a 512 GB region takes away 39 bits, and the amd64 260 // doesn't let us choose the top 17 bits, so that leaves the 9 bits 261 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 262 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 263 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 264 // UTF-8 sequences, and they are otherwise as far away from 265 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 266 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 267 // on OS X during thread allocations. 0x00c0 causes conflicts with 268 // AddressSanitizer which reserves all memory up to 0x0100. 269 // These choices are both for debuggability and to reduce the 270 // odds of a conservative garbage collector (as is still used in gccgo) 271 // not collecting memory because some non-pointer block of memory 272 // had a bit pattern that matched a memory address. 273 // 274 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) 275 // but it hardly matters: e0 00 is not valid UTF-8 either. 276 // 277 // If this fails we fall back to the 32 bit memory mechanism 278 // 279 // However, on arm64, we ignore all this advice above and slam the 280 // allocation at 0x40 << 32 because when using 4k pages with 3-level 281 // translation buffers, the user address space is limited to 39 bits 282 // On darwin/arm64, the address space is even smaller. 283 arenaSize := round(_MaxMem, _PageSize) 284 bitmapSize = arenaSize / (sys.PtrSize * 8 / 2) 285 spansSize = arenaSize / _PageSize * sys.PtrSize 286 spansSize = round(spansSize, _PageSize) 287 for i := 0; i <= 0x7f; i++ { 288 switch { 289 case GOARCH == "arm64" && GOOS == "darwin": 290 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 291 case GOARCH == "arm64": 292 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 293 default: 294 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 295 } 296 pSize = bitmapSize + spansSize + arenaSize + _PageSize 297 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 298 if p != 0 { 299 break 300 } 301 } 302 } 303 304 if p == 0 { 305 // On a 32-bit machine, we can't typically get away 306 // with a giant virtual address space reservation. 307 // Instead we map the memory information bitmap 308 // immediately after the data segment, large enough 309 // to handle the entire 4GB address space (256 MB), 310 // along with a reservation for an initial arena. 311 // When that gets used up, we'll start asking the kernel 312 // for any memory anywhere. 313 314 // If we fail to allocate, try again with a smaller arena. 315 // This is necessary on Android L where we share a process 316 // with ART, which reserves virtual memory aggressively. 317 // In the worst case, fall back to a 0-sized initial arena, 318 // in the hope that subsequent reservations will succeed. 319 arenaSizes := []uintptr{ 320 512 << 20, 321 256 << 20, 322 128 << 20, 323 0, 324 } 325 326 for _, arenaSize := range arenaSizes { 327 bitmapSize = (_MaxArena32 + 1) / (sys.PtrSize * 8 / 2) 328 spansSize = (_MaxArena32 + 1) / _PageSize * sys.PtrSize 329 if limit > 0 && arenaSize+bitmapSize+spansSize > limit { 330 bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1) 331 arenaSize = bitmapSize * 8 332 spansSize = arenaSize / _PageSize * sys.PtrSize 333 } 334 spansSize = round(spansSize, _PageSize) 335 336 // SysReserve treats the address we ask for, end, as a hint, 337 // not as an absolute requirement. If we ask for the end 338 // of the data segment but the operating system requires 339 // a little more space before we can start allocating, it will 340 // give out a slightly higher pointer. Except QEMU, which 341 // is buggy, as usual: it won't adjust the pointer upward. 342 // So adjust it upward a little bit ourselves: 1/4 MB to get 343 // away from the running binary image and then round up 344 // to a MB boundary. 345 p = round(firstmoduledata.end+(1<<18), 1<<20) 346 pSize = bitmapSize + spansSize + arenaSize + _PageSize 347 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 348 if p != 0 { 349 break 350 } 351 } 352 if p == 0 { 353 throw("runtime: cannot reserve arena virtual address space") 354 } 355 } 356 357 // PageSize can be larger than OS definition of page size, 358 // so SysReserve can give us a PageSize-unaligned pointer. 359 // To overcome this we ask for PageSize more and round up the pointer. 360 p1 := round(p, _PageSize) 361 362 mheap_.spans = (**mspan)(unsafe.Pointer(p1)) 363 mheap_.bitmap = p1 + spansSize + bitmapSize 364 if sys.PtrSize == 4 { 365 // Set arena_start such that we can accept memory 366 // reservations located anywhere in the 4GB virtual space. 367 mheap_.arena_start = 0 368 } else { 369 mheap_.arena_start = p1 + (spansSize + bitmapSize) 370 } 371 mheap_.arena_end = p + pSize 372 mheap_.arena_used = p1 + (spansSize + bitmapSize) 373 mheap_.arena_reserved = reserved 374 375 if mheap_.arena_start&(_PageSize-1) != 0 { 376 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) 377 throw("misrounded allocation in mallocinit") 378 } 379 380 // Initialize the rest of the allocator. 381 mheap_.init(spansSize) 382 _g_ := getg() 383 _g_.m.mcache = allocmcache() 384 } 385 386 // sysAlloc allocates the next n bytes from the heap arena. The 387 // returned pointer is always _PageSize aligned and between 388 // h.arena_start and h.arena_end. sysAlloc returns nil on failure. 389 // There is no corresponding free function. 390 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { 391 if n > h.arena_end-h.arena_used { 392 // We are in 32-bit mode, maybe we didn't use all possible address space yet. 393 // Reserve some more space. 394 p_size := round(n+_PageSize, 256<<20) 395 new_end := h.arena_end + p_size // Careful: can overflow 396 if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxArena32 { 397 // TODO: It would be bad if part of the arena 398 // is reserved and part is not. 399 var reserved bool 400 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) 401 if p == 0 { 402 return nil 403 } 404 if p == h.arena_end { 405 h.arena_end = new_end 406 h.arena_reserved = reserved 407 } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxArena32 { 408 // Keep everything page-aligned. 409 // Our pages are bigger than hardware pages. 410 h.arena_end = p + p_size 411 used := p + (-p & (_PageSize - 1)) 412 h.mapBits(used) 413 h.mapSpans(used) 414 h.arena_used = used 415 h.arena_reserved = reserved 416 } else { 417 // We haven't added this allocation to 418 // the stats, so subtract it from a 419 // fake stat (but avoid underflow). 420 stat := uint64(p_size) 421 sysFree(unsafe.Pointer(p), p_size, &stat) 422 } 423 } 424 } 425 426 if n <= h.arena_end-h.arena_used { 427 // Keep taking from our reservation. 428 p := h.arena_used 429 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) 430 h.mapBits(p + n) 431 h.mapSpans(p + n) 432 h.arena_used = p + n 433 if raceenabled { 434 racemapshadow(unsafe.Pointer(p), n) 435 } 436 437 if p&(_PageSize-1) != 0 { 438 throw("misrounded allocation in MHeap_SysAlloc") 439 } 440 return unsafe.Pointer(p) 441 } 442 443 // If using 64-bit, our reservation is all we have. 444 if h.arena_end-h.arena_start > _MaxArena32 { 445 return nil 446 } 447 448 // On 32-bit, once the reservation is gone we can 449 // try to get memory at a location chosen by the OS. 450 p_size := round(n, _PageSize) + _PageSize 451 p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) 452 if p == 0 { 453 return nil 454 } 455 456 if p < h.arena_start || p+p_size-h.arena_start > _MaxArena32 { 457 top := ^uintptr(0) 458 if top-h.arena_start-1 > _MaxArena32 { 459 top = h.arena_start + _MaxArena32 + 1 460 } 461 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n") 462 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) 463 return nil 464 } 465 466 p_end := p + p_size 467 p += -p & (_PageSize - 1) 468 if p+n > h.arena_used { 469 h.mapBits(p + n) 470 h.mapSpans(p + n) 471 h.arena_used = p + n 472 if p_end > h.arena_end { 473 h.arena_end = p_end 474 } 475 if raceenabled { 476 racemapshadow(unsafe.Pointer(p), n) 477 } 478 } 479 480 if p&(_PageSize-1) != 0 { 481 throw("misrounded allocation in MHeap_SysAlloc") 482 } 483 return unsafe.Pointer(p) 484 } 485 486 // base address for all 0-byte allocations 487 var zerobase uintptr 488 489 // nextFreeFast returns the next free object if one is quickly available. 490 // Otherwise it returns 0. 491 func nextFreeFast(s *mspan) gclinkptr { 492 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 493 if theBit < 64 { 494 result := s.freeindex + uintptr(theBit) 495 if result < s.nelems { 496 freeidx := result + 1 497 if freeidx%64 == 0 && freeidx != s.nelems { 498 return 0 499 } 500 s.allocCache >>= (theBit + 1) 501 s.freeindex = freeidx 502 v := gclinkptr(result*s.elemsize + s.base()) 503 s.allocCount++ 504 return v 505 } 506 } 507 return 0 508 } 509 510 // nextFree returns the next free object from the cached span if one is available. 511 // Otherwise it refills the cache with a span with an available object and 512 // returns that object along with a flag indicating that this was a heavy 513 // weight allocation. If it is a heavy weight allocation the caller must 514 // determine whether a new GC cycle needs to be started or if the GC is active 515 // whether this goroutine needs to assist the GC. 516 func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, s *mspan, shouldhelpgc bool) { 517 s = c.alloc[sizeclass] 518 shouldhelpgc = false 519 freeIndex := s.nextFreeIndex() 520 if freeIndex == s.nelems { 521 // The span is full. 522 if uintptr(s.allocCount) != s.nelems { 523 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 524 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 525 } 526 systemstack(func() { 527 c.refill(int32(sizeclass)) 528 }) 529 shouldhelpgc = true 530 s = c.alloc[sizeclass] 531 532 freeIndex = s.nextFreeIndex() 533 } 534 535 if freeIndex >= s.nelems { 536 throw("freeIndex is not valid") 537 } 538 539 v = gclinkptr(freeIndex*s.elemsize + s.base()) 540 s.allocCount++ 541 if uintptr(s.allocCount) > s.nelems { 542 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 543 throw("s.allocCount > s.nelems") 544 } 545 return 546 } 547 548 // Allocate an object of size bytes. 549 // Small objects are allocated from the per-P cache's free lists. 550 // Large objects (> 32 kB) are allocated straight from the heap. 551 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 552 if gcphase == _GCmarktermination { 553 throw("mallocgc called with gcphase == _GCmarktermination") 554 } 555 556 if size == 0 { 557 return unsafe.Pointer(&zerobase) 558 } 559 560 if debug.sbrk != 0 { 561 align := uintptr(16) 562 if typ != nil { 563 align = uintptr(typ.align) 564 } 565 return persistentalloc(size, align, &memstats.other_sys) 566 } 567 568 // assistG is the G to charge for this allocation, or nil if 569 // GC is not currently active. 570 var assistG *g 571 if gcBlackenEnabled != 0 { 572 // Charge the current user G for this allocation. 573 assistG = getg() 574 if assistG.m.curg != nil { 575 assistG = assistG.m.curg 576 } 577 // Charge the allocation against the G. We'll account 578 // for internal fragmentation at the end of mallocgc. 579 assistG.gcAssistBytes -= int64(size) 580 581 if assistG.gcAssistBytes < 0 { 582 // This G is in debt. Assist the GC to correct 583 // this before allocating. This must happen 584 // before disabling preemption. 585 gcAssistAlloc(assistG) 586 } 587 } 588 589 // Set mp.mallocing to keep from being preempted by GC. 590 mp := acquirem() 591 if mp.mallocing != 0 { 592 throw("malloc deadlock") 593 } 594 if mp.gsignal == getg() { 595 throw("malloc during signal") 596 } 597 mp.mallocing = 1 598 599 shouldhelpgc := false 600 dataSize := size 601 c := gomcache() 602 var x unsafe.Pointer 603 noscan := typ == nil || typ.kind&kindNoPointers != 0 604 if size <= maxSmallSize { 605 if noscan && size < maxTinySize { 606 // Tiny allocator. 607 // 608 // Tiny allocator combines several tiny allocation requests 609 // into a single memory block. The resulting memory block 610 // is freed when all subobjects are unreachable. The subobjects 611 // must be noscan (don't have pointers), this ensures that 612 // the amount of potentially wasted memory is bounded. 613 // 614 // Size of the memory block used for combining (maxTinySize) is tunable. 615 // Current setting is 16 bytes, which relates to 2x worst case memory 616 // wastage (when all but one subobjects are unreachable). 617 // 8 bytes would result in no wastage at all, but provides less 618 // opportunities for combining. 619 // 32 bytes provides more opportunities for combining, 620 // but can lead to 4x worst case wastage. 621 // The best case winning is 8x regardless of block size. 622 // 623 // Objects obtained from tiny allocator must not be freed explicitly. 624 // So when an object will be freed explicitly, we ensure that 625 // its size >= maxTinySize. 626 // 627 // SetFinalizer has a special case for objects potentially coming 628 // from tiny allocator, it such case it allows to set finalizers 629 // for an inner byte of a memory block. 630 // 631 // The main targets of tiny allocator are small strings and 632 // standalone escaping variables. On a json benchmark 633 // the allocator reduces number of allocations by ~12% and 634 // reduces heap size by ~20%. 635 off := c.tinyoffset 636 // Align tiny pointer for required (conservative) alignment. 637 if size >= 8 { 638 off = round(off, 8) 639 } else if size >= 4 { 640 off = round(off, 4) 641 } else if size >= 2 { 642 off = round(off, 2) 643 } 644 if off+size <= maxTinySize && c.tiny != 0 { 645 // The object fits into existing tiny block. 646 x = unsafe.Pointer(c.tiny + off) 647 c.tinyoffset = off + size 648 c.local_tinyallocs++ 649 mp.mallocing = 0 650 releasem(mp) 651 return x 652 } 653 // Allocate a new maxTinySize block. 654 span := c.alloc[tinySizeClass] 655 v := nextFreeFast(span) 656 if v == 0 { 657 v, _, shouldhelpgc = c.nextFree(tinySizeClass) 658 } 659 x = unsafe.Pointer(v) 660 (*[2]uint64)(x)[0] = 0 661 (*[2]uint64)(x)[1] = 0 662 // See if we need to replace the existing tiny block with the new one 663 // based on amount of remaining free space. 664 if size < c.tinyoffset || c.tiny == 0 { 665 c.tiny = uintptr(x) 666 c.tinyoffset = size 667 } 668 size = maxTinySize 669 } else { 670 var sizeclass int8 671 if size <= 1024-8 { 672 sizeclass = size_to_class8[(size+7)>>3] 673 } else { 674 sizeclass = size_to_class128[(size-1024+127)>>7] 675 } 676 size = uintptr(class_to_size[sizeclass]) 677 span := c.alloc[sizeclass] 678 v := nextFreeFast(span) 679 if v == 0 { 680 v, span, shouldhelpgc = c.nextFree(sizeclass) 681 } 682 x = unsafe.Pointer(v) 683 if needzero && span.needzero != 0 { 684 memclr(unsafe.Pointer(v), size) 685 } 686 } 687 } else { 688 var s *mspan 689 shouldhelpgc = true 690 systemstack(func() { 691 s = largeAlloc(size, needzero) 692 }) 693 s.freeindex = 1 694 s.allocCount = 1 695 x = unsafe.Pointer(s.base()) 696 size = s.elemsize 697 } 698 699 var scanSize uintptr 700 if noscan { 701 heapBitsSetTypeNoScan(uintptr(x)) 702 } else { 703 // If allocating a defer+arg block, now that we've picked a malloc size 704 // large enough to hold everything, cut the "asked for" size down to 705 // just the defer header, so that the GC bitmap will record the arg block 706 // as containing nothing at all (as if it were unused space at the end of 707 // a malloc block caused by size rounding). 708 // The defer arg areas are scanned as part of scanstack. 709 if typ == deferType { 710 dataSize = unsafe.Sizeof(_defer{}) 711 } 712 heapBitsSetType(uintptr(x), size, dataSize, typ) 713 if dataSize > typ.size { 714 // Array allocation. If there are any 715 // pointers, GC has to scan to the last 716 // element. 717 if typ.ptrdata != 0 { 718 scanSize = dataSize - typ.size + typ.ptrdata 719 } 720 } else { 721 scanSize = typ.ptrdata 722 } 723 c.local_scan += scanSize 724 } 725 726 // Ensure that the stores above that initialize x to 727 // type-safe memory and set the heap bits occur before 728 // the caller can make x observable to the garbage 729 // collector. Otherwise, on weakly ordered machines, 730 // the garbage collector could follow a pointer to x, 731 // but see uninitialized memory or stale heap bits. 732 publicationBarrier() 733 734 // Allocate black during GC. 735 // All slots hold nil so no scanning is needed. 736 // This may be racing with GC so do it atomically if there can be 737 // a race marking the bit. 738 if gcphase != _GCoff { 739 gcmarknewobject(uintptr(x), size, scanSize) 740 } 741 742 if raceenabled { 743 racemalloc(x, size) 744 } 745 746 if msanenabled { 747 msanmalloc(x, size) 748 } 749 750 mp.mallocing = 0 751 releasem(mp) 752 753 if debug.allocfreetrace != 0 { 754 tracealloc(x, size, typ) 755 } 756 757 if rate := MemProfileRate; rate > 0 { 758 if size < uintptr(rate) && int32(size) < c.next_sample { 759 c.next_sample -= int32(size) 760 } else { 761 mp := acquirem() 762 profilealloc(mp, x, size) 763 releasem(mp) 764 } 765 } 766 767 if assistG != nil { 768 // Account for internal fragmentation in the assist 769 // debt now that we know it. 770 assistG.gcAssistBytes -= int64(size - dataSize) 771 } 772 773 if shouldhelpgc && gcShouldStart(false) { 774 gcStart(gcBackgroundMode, false) 775 } 776 777 return x 778 } 779 780 func largeAlloc(size uintptr, needzero bool) *mspan { 781 // print("largeAlloc size=", size, "\n") 782 783 if size+_PageSize < size { 784 throw("out of memory") 785 } 786 npages := size >> _PageShift 787 if size&_PageMask != 0 { 788 npages++ 789 } 790 791 // Deduct credit for this span allocation and sweep if 792 // necessary. mHeap_Alloc will also sweep npages, so this only 793 // pays the debt down to npage pages. 794 deductSweepCredit(npages*_PageSize, npages) 795 796 s := mheap_.alloc(npages, 0, true, needzero) 797 if s == nil { 798 throw("out of memory") 799 } 800 s.limit = s.base() + size 801 heapBitsForSpan(s.base()).initSpan(s) 802 return s 803 } 804 805 // implementation of new builtin 806 func newobject(typ *_type) unsafe.Pointer { 807 return mallocgc(typ.size, typ, true) 808 } 809 810 //go:linkname reflect_unsafe_New reflect.unsafe_New 811 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 812 return newobject(typ) 813 } 814 815 // newarray allocates an array of n elements of type typ. 816 func newarray(typ *_type, n int) unsafe.Pointer { 817 if n < 0 || uintptr(n) > maxSliceCap(typ.size) { 818 panic(plainError("runtime: allocation size out of range")) 819 } 820 return mallocgc(typ.size*uintptr(n), typ, true) 821 } 822 823 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 824 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 825 return newarray(typ, n) 826 } 827 828 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 829 mp.mcache.next_sample = nextSample() 830 mProf_Malloc(x, size) 831 } 832 833 // nextSample returns the next sampling point for heap profiling. 834 // It produces a random variable with a geometric distribution and 835 // mean MemProfileRate. This is done by generating a uniformly 836 // distributed random number and applying the cumulative distribution 837 // function for an exponential. 838 func nextSample() int32 { 839 if GOOS == "plan9" { 840 // Plan 9 doesn't support floating point in note handler. 841 if g := getg(); g == g.m.gsignal { 842 return nextSampleNoFP() 843 } 844 } 845 846 period := MemProfileRate 847 848 // make nextSample not overflow. Maximum possible step is 849 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period. 850 switch { 851 case period > 0x7000000: 852 period = 0x7000000 853 case period == 0: 854 return 0 855 } 856 857 // Let m be the sample rate, 858 // the probability distribution function is m*exp(-mx), so the CDF is 859 // p = 1 - exp(-mx), so 860 // q = 1 - p == exp(-mx) 861 // log_e(q) = -mx 862 // -log_e(q)/m = x 863 // x = -log_e(q) * period 864 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency 865 const randomBitCount = 26 866 q := fastrand1()%(1<<randomBitCount) + 1 867 qlog := fastlog2(float64(q)) - randomBitCount 868 if qlog > 0 { 869 qlog = 0 870 } 871 const minusLog2 = -0.6931471805599453 // -ln(2) 872 return int32(qlog*(minusLog2*float64(period))) + 1 873 } 874 875 // nextSampleNoFP is similar to nextSample, but uses older, 876 // simpler code to avoid floating point. 877 func nextSampleNoFP() int32 { 878 // Set first allocation sample size. 879 rate := MemProfileRate 880 if rate > 0x3fffffff { // make 2*rate not overflow 881 rate = 0x3fffffff 882 } 883 if rate != 0 { 884 return int32(int(fastrand1()) % (2 * rate)) 885 } 886 return 0 887 } 888 889 type persistentAlloc struct { 890 base unsafe.Pointer 891 off uintptr 892 } 893 894 var globalAlloc struct { 895 mutex 896 persistentAlloc 897 } 898 899 // Wrapper around sysAlloc that can allocate small chunks. 900 // There is no associated free operation. 901 // Intended for things like function/type/debug-related persistent data. 902 // If align is 0, uses default align (currently 8). 903 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 904 var p unsafe.Pointer 905 systemstack(func() { 906 p = persistentalloc1(size, align, sysStat) 907 }) 908 return p 909 } 910 911 // Must run on system stack because stack growth can (re)invoke it. 912 // See issue 9174. 913 //go:systemstack 914 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { 915 const ( 916 chunk = 256 << 10 917 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 918 ) 919 920 if size == 0 { 921 throw("persistentalloc: size == 0") 922 } 923 if align != 0 { 924 if align&(align-1) != 0 { 925 throw("persistentalloc: align is not a power of 2") 926 } 927 if align > _PageSize { 928 throw("persistentalloc: align is too large") 929 } 930 } else { 931 align = 8 932 } 933 934 if size >= maxBlock { 935 return sysAlloc(size, sysStat) 936 } 937 938 mp := acquirem() 939 var persistent *persistentAlloc 940 if mp != nil && mp.p != 0 { 941 persistent = &mp.p.ptr().palloc 942 } else { 943 lock(&globalAlloc.mutex) 944 persistent = &globalAlloc.persistentAlloc 945 } 946 947 if persistent.base != nil && size > 1 { 948 off := persistent.off 949 if size >= 8 { 950 off = round(off, 8) 951 } else if size >= 4 { 952 off = round(off, 4) 953 } else if size >= 2 { 954 off = round(off, 2) 955 } 956 persistent.off = off 957 } 958 959 if persistent.base == nil || persistent.off+size > chunk { 960 persistent.base = sysAlloc(chunk, &memstats.other_sys) 961 if persistent.base == nil { 962 if persistent == &globalAlloc.persistentAlloc { 963 unlock(&globalAlloc.mutex) 964 } 965 throw("runtime: cannot allocate memory") 966 } 967 persistent.off = 0 968 } 969 p := add(persistent.base, persistent.off) 970 persistent.off += size 971 releasem(mp) 972 if persistent == &globalAlloc.persistentAlloc { 973 unlock(&globalAlloc.mutex) 974 } 975 976 if sysStat != &memstats.other_sys { 977 mSysStatInc(sysStat, size) 978 mSysStatDec(&memstats.other_sys, size) 979 } 980 return p 981 }