github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/runtime/malloc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan 60 // is now "idle", so it is returned to the mheap and no longer 61 // has a size class. 62 // This may coalesce it with adjacent idle mspans. 63 // 64 // 4. If an mspan remains idle for long enough, return its pages 65 // to the operating system. 66 // 67 // Allocating and freeing a large object uses the mheap 68 // directly, bypassing the mcache and mcentral. 69 // 70 // Free object slots in an mspan are zeroed only if mspan.needzero is 71 // false. If needzero is true, objects are zeroed as they are 72 // allocated. There are various benefits to delaying zeroing this way: 73 // 74 // 1. Stack frame allocation can avoid zeroing altogether. 75 // 76 // 2. It exhibits better temporal locality, since the program is 77 // probably about to write to the memory. 78 // 79 // 3. We don't zero pages that never get reused. 80 81 package runtime 82 83 import ( 84 "runtime/internal/sys" 85 "unsafe" 86 ) 87 88 const ( 89 debugMalloc = false 90 91 maxTinySize = _TinySize 92 tinySizeClass = _TinySizeClass 93 maxSmallSize = _MaxSmallSize 94 95 pageShift = _PageShift 96 pageSize = _PageSize 97 pageMask = _PageMask 98 // By construction, single page spans of the smallest object class 99 // have the most objects per span. 100 maxObjsPerSpan = pageSize / 8 101 102 mSpanInUse = _MSpanInUse 103 104 concurrentSweep = _ConcurrentSweep 105 106 _PageSize = 1 << _PageShift 107 _PageMask = _PageSize - 1 108 109 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 110 _64bit = 1 << (^uintptr(0) >> 63) / 2 111 112 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 113 _TinySize = 16 114 _TinySizeClass = 2 115 116 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 117 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. 118 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth 119 120 // Per-P, per order stack segment cache size. 121 _StackCacheSize = 32 * 1024 122 123 // Number of orders that get caching. Order 0 is FixedStack 124 // and each successive order is twice as large. 125 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 126 // will be allocated directly. 127 // Since FixedStack is different on different systems, we 128 // must vary NumStackOrders to keep the same maximum cached size. 129 // OS | FixedStack | NumStackOrders 130 // -----------------+------------+--------------- 131 // linux/darwin/bsd | 2KB | 4 132 // windows/32 | 4KB | 3 133 // windows/64 | 8KB | 2 134 // plan9 | 4KB | 3 135 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 136 137 // Number of bits in page to span calculations (4k pages). 138 // On Windows 64-bit we limit the arena to 32GB or 35 bits. 139 // Windows counts memory used by page table into committed memory 140 // of the process, so we can't reserve too much memory. 141 // See https://golang.org/issue/5402 and https://golang.org/issue/5236. 142 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. 143 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 144 // The only exception is mips32 which only has access to low 2GB of virtual memory. 145 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, 146 // but as most devices have less than 4GB of physical memory anyway, we 147 // try to be conservative here, and only ask for a 2GB heap. 148 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle)) 149 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift 150 151 // _MaxMem is the maximum heap arena size minus 1. 152 // 153 // On 32-bit, this is also the maximum heap pointer value, 154 // since the arena starts at address 0. 155 _MaxMem = 1<<_MHeapMap_TotalBits - 1 156 157 // Max number of threads to run garbage collection. 158 // 2, 3, and 4 are all plausible maximums depending 159 // on the hardware details of the machine. The garbage 160 // collector scales well to 32 cpus. 161 _MaxGcproc = 32 162 163 // minLegalPointer is the smallest possible legal pointer. 164 // This is the smallest possible architectural page size, 165 // since we assume that the first page is never mapped. 166 // 167 // This should agree with minZeroPage in the compiler. 168 minLegalPointer uintptr = 4096 169 ) 170 171 // physPageSize is the size in bytes of the OS's physical pages. 172 // Mapping and unmapping operations must be done at multiples of 173 // physPageSize. 174 // 175 // This must be set by the OS init code (typically in osinit) before 176 // mallocinit. 177 var physPageSize uintptr 178 179 // OS-defined helpers: 180 // 181 // sysAlloc obtains a large chunk of zeroed memory from the 182 // operating system, typically on the order of a hundred kilobytes 183 // or a megabyte. 184 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator 185 // may use larger alignment, so the caller must be careful to realign the 186 // memory obtained by sysAlloc. 187 // 188 // SysUnused notifies the operating system that the contents 189 // of the memory region are no longer needed and can be reused 190 // for other purposes. 191 // SysUsed notifies the operating system that the contents 192 // of the memory region are needed again. 193 // 194 // SysFree returns it unconditionally; this is only used if 195 // an out-of-memory error has been detected midway through 196 // an allocation. It is okay if SysFree is a no-op. 197 // 198 // SysReserve reserves address space without allocating memory. 199 // If the pointer passed to it is non-nil, the caller wants the 200 // reservation there, but SysReserve can still choose another 201 // location if that one is unavailable. On some systems and in some 202 // cases SysReserve will simply check that the address space is 203 // available and not actually reserve it. If SysReserve returns 204 // non-nil, it sets *reserved to true if the address space is 205 // reserved, false if it has merely been checked. 206 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator 207 // may use larger alignment, so the caller must be careful to realign the 208 // memory obtained by sysAlloc. 209 // 210 // SysMap maps previously reserved address space for use. 211 // The reserved argument is true if the address space was really 212 // reserved, not merely checked. 213 // 214 // SysFault marks a (already sysAlloc'd) region to fault 215 // if accessed. Used only for debugging the runtime. 216 217 func mallocinit() { 218 if class_to_size[_TinySizeClass] != _TinySize { 219 throw("bad TinySizeClass") 220 } 221 222 testdefersizes() 223 224 // Copy class sizes out for statistics table. 225 for i := range class_to_size { 226 memstats.by_size[i].size = uint32(class_to_size[i]) 227 } 228 229 // Check physPageSize. 230 if physPageSize == 0 { 231 // The OS init code failed to fetch the physical page size. 232 throw("failed to get system page size") 233 } 234 if physPageSize < minPhysPageSize { 235 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 236 throw("bad system page size") 237 } 238 if physPageSize&(physPageSize-1) != 0 { 239 print("system page size (", physPageSize, ") must be a power of 2\n") 240 throw("bad system page size") 241 } 242 243 // The auxiliary regions start at p and are laid out in the 244 // following order: spans, bitmap, arena. 245 var p, pSize uintptr 246 var reserved bool 247 248 // The spans array holds one *mspan per _PageSize of arena. 249 var spansSize uintptr = (_MaxMem + 1) / _PageSize * sys.PtrSize 250 spansSize = round(spansSize, _PageSize) 251 // The bitmap holds 2 bits per word of arena. 252 var bitmapSize uintptr = (_MaxMem + 1) / (sys.PtrSize * 8 / 2) 253 bitmapSize = round(bitmapSize, _PageSize) 254 255 // Set up the allocation arena, a contiguous area of memory where 256 // allocated data will be found. 257 if sys.PtrSize == 8 { 258 // On a 64-bit machine, allocate from a single contiguous reservation. 259 // 512 GB (MaxMem) should be big enough for now. 260 // 261 // The code will work with the reservation at any address, but ask 262 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 263 // Allocating a 512 GB region takes away 39 bits, and the amd64 264 // doesn't let us choose the top 17 bits, so that leaves the 9 bits 265 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 266 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 267 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 268 // UTF-8 sequences, and they are otherwise as far away from 269 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 270 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 271 // on OS X during thread allocations. 0x00c0 causes conflicts with 272 // AddressSanitizer which reserves all memory up to 0x0100. 273 // These choices are both for debuggability and to reduce the 274 // odds of a conservative garbage collector (as is still used in gccgo) 275 // not collecting memory because some non-pointer block of memory 276 // had a bit pattern that matched a memory address. 277 // 278 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) 279 // but it hardly matters: e0 00 is not valid UTF-8 either. 280 // 281 // If this fails we fall back to the 32 bit memory mechanism 282 // 283 // However, on arm64, we ignore all this advice above and slam the 284 // allocation at 0x40 << 32 because when using 4k pages with 3-level 285 // translation buffers, the user address space is limited to 39 bits 286 // On darwin/arm64, the address space is even smaller. 287 arenaSize := round(_MaxMem, _PageSize) 288 pSize = bitmapSize + spansSize + arenaSize + _PageSize 289 for i := 0; i <= 0x7f; i++ { 290 switch { 291 case GOARCH == "arm64" && GOOS == "darwin": 292 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 293 case GOARCH == "arm64": 294 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 295 default: 296 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 297 } 298 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 299 if p != 0 { 300 break 301 } 302 } 303 } 304 305 if p == 0 { 306 // On a 32-bit machine, we can't typically get away 307 // with a giant virtual address space reservation. 308 // Instead we map the memory information bitmap 309 // immediately after the data segment, large enough 310 // to handle the entire 4GB address space (256 MB), 311 // along with a reservation for an initial arena. 312 // When that gets used up, we'll start asking the kernel 313 // for any memory anywhere. 314 315 // We want to start the arena low, but if we're linked 316 // against C code, it's possible global constructors 317 // have called malloc and adjusted the process' brk. 318 // Query the brk so we can avoid trying to map the 319 // arena over it (which will cause the kernel to put 320 // the arena somewhere else, likely at a high 321 // address). 322 procBrk := sbrk0() 323 324 // If we fail to allocate, try again with a smaller arena. 325 // This is necessary on Android L where we share a process 326 // with ART, which reserves virtual memory aggressively. 327 // In the worst case, fall back to a 0-sized initial arena, 328 // in the hope that subsequent reservations will succeed. 329 arenaSizes := []uintptr{ 330 512 << 20, 331 256 << 20, 332 128 << 20, 333 0, 334 } 335 336 for _, arenaSize := range arenaSizes { 337 // SysReserve treats the address we ask for, end, as a hint, 338 // not as an absolute requirement. If we ask for the end 339 // of the data segment but the operating system requires 340 // a little more space before we can start allocating, it will 341 // give out a slightly higher pointer. Except QEMU, which 342 // is buggy, as usual: it won't adjust the pointer upward. 343 // So adjust it upward a little bit ourselves: 1/4 MB to get 344 // away from the running binary image and then round up 345 // to a MB boundary. 346 p = round(firstmoduledata.end+(1<<18), 1<<20) 347 pSize = bitmapSize + spansSize + arenaSize + _PageSize 348 if p <= procBrk && procBrk < p+pSize { 349 // Move the start above the brk, 350 // leaving some room for future brk 351 // expansion. 352 p = round(procBrk+(1<<20), 1<<20) 353 } 354 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 355 if p != 0 { 356 break 357 } 358 } 359 if p == 0 { 360 throw("runtime: cannot reserve arena virtual address space") 361 } 362 } 363 364 // PageSize can be larger than OS definition of page size, 365 // so SysReserve can give us a PageSize-unaligned pointer. 366 // To overcome this we ask for PageSize more and round up the pointer. 367 p1 := round(p, _PageSize) 368 pSize -= p1 - p 369 370 spansStart := p1 371 p1 += spansSize 372 mheap_.bitmap = p1 + bitmapSize 373 p1 += bitmapSize 374 if sys.PtrSize == 4 { 375 // Set arena_start such that we can accept memory 376 // reservations located anywhere in the 4GB virtual space. 377 mheap_.arena_start = 0 378 } else { 379 mheap_.arena_start = p1 380 } 381 mheap_.arena_end = p + pSize 382 mheap_.arena_used = p1 383 mheap_.arena_reserved = reserved 384 385 if mheap_.arena_start&(_PageSize-1) != 0 { 386 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) 387 throw("misrounded allocation in mallocinit") 388 } 389 390 // Initialize the rest of the allocator. 391 mheap_.init(spansStart, spansSize) 392 _g_ := getg() 393 _g_.m.mcache = allocmcache() 394 } 395 396 // sysAlloc allocates the next n bytes from the heap arena. The 397 // returned pointer is always _PageSize aligned and between 398 // h.arena_start and h.arena_end. sysAlloc returns nil on failure. 399 // There is no corresponding free function. 400 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { 401 if n > h.arena_end-h.arena_used { 402 // If we haven't grown the arena to _MaxMem yet, try 403 // to reserve some more address space. 404 p_size := round(n+_PageSize, 256<<20) 405 new_end := h.arena_end + p_size // Careful: can overflow 406 if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxMem { 407 // TODO: It would be bad if part of the arena 408 // is reserved and part is not. 409 var reserved bool 410 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) 411 if p == 0 { 412 return nil 413 } 414 if p == h.arena_end { 415 // The new reservation is contiguous 416 // with the old reservation. 417 h.arena_end = new_end 418 h.arena_reserved = reserved 419 } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem { 420 // We were able to reserve more memory 421 // within the arena space, but it's 422 // not contiguous with our previous 423 // reservation. Skip over the unused 424 // address space. 425 // 426 // Keep everything page-aligned. 427 // Our pages are bigger than hardware pages. 428 h.arena_end = p + p_size 429 used := p + (-p & (_PageSize - 1)) 430 h.setArenaUsed(used, false) 431 h.arena_reserved = reserved 432 } else { 433 // We haven't added this allocation to 434 // the stats, so subtract it from a 435 // fake stat (but avoid underflow). 436 stat := uint64(p_size) 437 sysFree(unsafe.Pointer(p), p_size, &stat) 438 } 439 } 440 } 441 442 if n <= h.arena_end-h.arena_used { 443 // Keep taking from our reservation. 444 p := h.arena_used 445 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) 446 h.setArenaUsed(p+n, true) 447 448 if p&(_PageSize-1) != 0 { 449 throw("misrounded allocation in MHeap_SysAlloc") 450 } 451 return unsafe.Pointer(p) 452 } 453 454 // If using 64-bit, our reservation is all we have. 455 if sys.PtrSize != 4 { 456 return nil 457 } 458 459 // On 32-bit, once the reservation is gone we can 460 // try to get memory at a location chosen by the OS. 461 p_size := round(n, _PageSize) + _PageSize 462 p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) 463 if p == 0 { 464 return nil 465 } 466 467 if p < h.arena_start || p+p_size-h.arena_start > _MaxMem { 468 // This shouldn't be possible because _MaxMem is the 469 // whole address space on 32-bit. 470 top := uint64(h.arena_start) + _MaxMem 471 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n") 472 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) 473 return nil 474 } 475 476 p_end := p + p_size 477 p += -p & (_PageSize - 1) 478 if p+n > h.arena_used { 479 h.setArenaUsed(p+n, true) 480 if p_end > h.arena_end { 481 h.arena_end = p_end 482 } 483 } 484 485 if p&(_PageSize-1) != 0 { 486 throw("misrounded allocation in MHeap_SysAlloc") 487 } 488 return unsafe.Pointer(p) 489 } 490 491 // base address for all 0-byte allocations 492 var zerobase uintptr 493 494 // nextFreeFast returns the next free object if one is quickly available. 495 // Otherwise it returns 0. 496 func nextFreeFast(s *mspan) gclinkptr { 497 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 498 if theBit < 64 { 499 result := s.freeindex + uintptr(theBit) 500 if result < s.nelems { 501 freeidx := result + 1 502 if freeidx%64 == 0 && freeidx != s.nelems { 503 return 0 504 } 505 s.allocCache >>= uint(theBit + 1) 506 s.freeindex = freeidx 507 v := gclinkptr(result*s.elemsize + s.base()) 508 s.allocCount++ 509 return v 510 } 511 } 512 return 0 513 } 514 515 // nextFree returns the next free object from the cached span if one is available. 516 // Otherwise it refills the cache with a span with an available object and 517 // returns that object along with a flag indicating that this was a heavy 518 // weight allocation. If it is a heavy weight allocation the caller must 519 // determine whether a new GC cycle needs to be started or if the GC is active 520 // whether this goroutine needs to assist the GC. 521 func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc bool) { 522 s = c.alloc[sizeclass] 523 shouldhelpgc = false 524 freeIndex := s.nextFreeIndex() 525 if freeIndex == s.nelems { 526 // The span is full. 527 if uintptr(s.allocCount) != s.nelems { 528 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 529 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 530 } 531 systemstack(func() { 532 c.refill(int32(sizeclass)) 533 }) 534 shouldhelpgc = true 535 s = c.alloc[sizeclass] 536 537 freeIndex = s.nextFreeIndex() 538 } 539 540 if freeIndex >= s.nelems { 541 throw("freeIndex is not valid") 542 } 543 544 v = gclinkptr(freeIndex*s.elemsize + s.base()) 545 s.allocCount++ 546 if uintptr(s.allocCount) > s.nelems { 547 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 548 throw("s.allocCount > s.nelems") 549 } 550 return 551 } 552 553 // Allocate an object of size bytes. 554 // Small objects are allocated from the per-P cache's free lists. 555 // Large objects (> 32 kB) are allocated straight from the heap. 556 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 557 if gcphase == _GCmarktermination { 558 throw("mallocgc called with gcphase == _GCmarktermination") 559 } 560 561 if size == 0 { 562 return unsafe.Pointer(&zerobase) 563 } 564 565 if debug.sbrk != 0 { 566 align := uintptr(16) 567 if typ != nil { 568 align = uintptr(typ.align) 569 } 570 return persistentalloc(size, align, &memstats.other_sys) 571 } 572 573 // assistG is the G to charge for this allocation, or nil if 574 // GC is not currently active. 575 var assistG *g 576 if gcBlackenEnabled != 0 { 577 // Charge the current user G for this allocation. 578 assistG = getg() 579 if assistG.m.curg != nil { 580 assistG = assistG.m.curg 581 } 582 // Charge the allocation against the G. We'll account 583 // for internal fragmentation at the end of mallocgc. 584 assistG.gcAssistBytes -= int64(size) 585 586 if assistG.gcAssistBytes < 0 { 587 // This G is in debt. Assist the GC to correct 588 // this before allocating. This must happen 589 // before disabling preemption. 590 gcAssistAlloc(assistG) 591 } 592 } 593 594 // Set mp.mallocing to keep from being preempted by GC. 595 mp := acquirem() 596 if mp.mallocing != 0 { 597 throw("malloc deadlock") 598 } 599 if mp.gsignal == getg() { 600 throw("malloc during signal") 601 } 602 mp.mallocing = 1 603 604 shouldhelpgc := false 605 dataSize := size 606 c := gomcache() 607 var x unsafe.Pointer 608 noscan := typ == nil || typ.kind&kindNoPointers != 0 609 if size <= maxSmallSize { 610 if noscan && size < maxTinySize { 611 // Tiny allocator. 612 // 613 // Tiny allocator combines several tiny allocation requests 614 // into a single memory block. The resulting memory block 615 // is freed when all subobjects are unreachable. The subobjects 616 // must be noscan (don't have pointers), this ensures that 617 // the amount of potentially wasted memory is bounded. 618 // 619 // Size of the memory block used for combining (maxTinySize) is tunable. 620 // Current setting is 16 bytes, which relates to 2x worst case memory 621 // wastage (when all but one subobjects are unreachable). 622 // 8 bytes would result in no wastage at all, but provides less 623 // opportunities for combining. 624 // 32 bytes provides more opportunities for combining, 625 // but can lead to 4x worst case wastage. 626 // The best case winning is 8x regardless of block size. 627 // 628 // Objects obtained from tiny allocator must not be freed explicitly. 629 // So when an object will be freed explicitly, we ensure that 630 // its size >= maxTinySize. 631 // 632 // SetFinalizer has a special case for objects potentially coming 633 // from tiny allocator, it such case it allows to set finalizers 634 // for an inner byte of a memory block. 635 // 636 // The main targets of tiny allocator are small strings and 637 // standalone escaping variables. On a json benchmark 638 // the allocator reduces number of allocations by ~12% and 639 // reduces heap size by ~20%. 640 off := c.tinyoffset 641 // Align tiny pointer for required (conservative) alignment. 642 if size&7 == 0 { 643 off = round(off, 8) 644 } else if size&3 == 0 { 645 off = round(off, 4) 646 } else if size&1 == 0 { 647 off = round(off, 2) 648 } 649 if off+size <= maxTinySize && c.tiny != 0 { 650 // The object fits into existing tiny block. 651 x = unsafe.Pointer(c.tiny + off) 652 c.tinyoffset = off + size 653 c.local_tinyallocs++ 654 mp.mallocing = 0 655 releasem(mp) 656 return x 657 } 658 // Allocate a new maxTinySize block. 659 span := c.alloc[tinySizeClass] 660 v := nextFreeFast(span) 661 if v == 0 { 662 v, _, shouldhelpgc = c.nextFree(tinySizeClass) 663 } 664 x = unsafe.Pointer(v) 665 (*[2]uint64)(x)[0] = 0 666 (*[2]uint64)(x)[1] = 0 667 // See if we need to replace the existing tiny block with the new one 668 // based on amount of remaining free space. 669 if size < c.tinyoffset || c.tiny == 0 { 670 c.tiny = uintptr(x) 671 c.tinyoffset = size 672 } 673 size = maxTinySize 674 } else { 675 var sizeclass uint8 676 if size <= smallSizeMax-8 { 677 sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv] 678 } else { 679 sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv] 680 } 681 size = uintptr(class_to_size[sizeclass]) 682 span := c.alloc[sizeclass] 683 v := nextFreeFast(span) 684 if v == 0 { 685 v, span, shouldhelpgc = c.nextFree(sizeclass) 686 } 687 x = unsafe.Pointer(v) 688 if needzero && span.needzero != 0 { 689 memclrNoHeapPointers(unsafe.Pointer(v), size) 690 } 691 } 692 } else { 693 var s *mspan 694 shouldhelpgc = true 695 systemstack(func() { 696 s = largeAlloc(size, needzero) 697 }) 698 s.freeindex = 1 699 s.allocCount = 1 700 x = unsafe.Pointer(s.base()) 701 size = s.elemsize 702 } 703 704 var scanSize uintptr 705 if noscan { 706 heapBitsSetTypeNoScan(uintptr(x)) 707 } else { 708 // If allocating a defer+arg block, now that we've picked a malloc size 709 // large enough to hold everything, cut the "asked for" size down to 710 // just the defer header, so that the GC bitmap will record the arg block 711 // as containing nothing at all (as if it were unused space at the end of 712 // a malloc block caused by size rounding). 713 // The defer arg areas are scanned as part of scanstack. 714 if typ == deferType { 715 dataSize = unsafe.Sizeof(_defer{}) 716 } 717 heapBitsSetType(uintptr(x), size, dataSize, typ) 718 if dataSize > typ.size { 719 // Array allocation. If there are any 720 // pointers, GC has to scan to the last 721 // element. 722 if typ.ptrdata != 0 { 723 scanSize = dataSize - typ.size + typ.ptrdata 724 } 725 } else { 726 scanSize = typ.ptrdata 727 } 728 c.local_scan += scanSize 729 } 730 731 // Ensure that the stores above that initialize x to 732 // type-safe memory and set the heap bits occur before 733 // the caller can make x observable to the garbage 734 // collector. Otherwise, on weakly ordered machines, 735 // the garbage collector could follow a pointer to x, 736 // but see uninitialized memory or stale heap bits. 737 publicationBarrier() 738 739 // Allocate black during GC. 740 // All slots hold nil so no scanning is needed. 741 // This may be racing with GC so do it atomically if there can be 742 // a race marking the bit. 743 if gcphase != _GCoff { 744 gcmarknewobject(uintptr(x), size, scanSize) 745 } 746 747 if raceenabled { 748 racemalloc(x, size) 749 } 750 751 if msanenabled { 752 msanmalloc(x, size) 753 } 754 755 mp.mallocing = 0 756 releasem(mp) 757 758 if debug.allocfreetrace != 0 { 759 tracealloc(x, size, typ) 760 } 761 762 if rate := MemProfileRate; rate > 0 { 763 if size < uintptr(rate) && int32(size) < c.next_sample { 764 c.next_sample -= int32(size) 765 } else { 766 mp := acquirem() 767 profilealloc(mp, x, size) 768 releasem(mp) 769 } 770 } 771 772 if assistG != nil { 773 // Account for internal fragmentation in the assist 774 // debt now that we know it. 775 assistG.gcAssistBytes -= int64(size - dataSize) 776 } 777 778 if shouldhelpgc { 779 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 780 gcStart(gcBackgroundMode, t) 781 } 782 } 783 784 return x 785 } 786 787 func largeAlloc(size uintptr, needzero bool) *mspan { 788 // print("largeAlloc size=", size, "\n") 789 790 if size+_PageSize < size { 791 throw("out of memory") 792 } 793 npages := size >> _PageShift 794 if size&_PageMask != 0 { 795 npages++ 796 } 797 798 // Deduct credit for this span allocation and sweep if 799 // necessary. mHeap_Alloc will also sweep npages, so this only 800 // pays the debt down to npage pages. 801 deductSweepCredit(npages*_PageSize, npages) 802 803 s := mheap_.alloc(npages, 0, true, needzero) 804 if s == nil { 805 throw("out of memory") 806 } 807 s.limit = s.base() + size 808 heapBitsForSpan(s.base()).initSpan(s) 809 return s 810 } 811 812 // implementation of new builtin 813 // compiler (both frontend and SSA backend) knows the signature 814 // of this function 815 func newobject(typ *_type) unsafe.Pointer { 816 return mallocgc(typ.size, typ, true) 817 } 818 819 //go:linkname reflect_unsafe_New reflect.unsafe_New 820 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 821 return newobject(typ) 822 } 823 824 // newarray allocates an array of n elements of type typ. 825 func newarray(typ *_type, n int) unsafe.Pointer { 826 if n < 0 || uintptr(n) > maxSliceCap(typ.size) { 827 panic(plainError("runtime: allocation size out of range")) 828 } 829 return mallocgc(typ.size*uintptr(n), typ, true) 830 } 831 832 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 833 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 834 return newarray(typ, n) 835 } 836 837 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 838 mp.mcache.next_sample = nextSample() 839 mProf_Malloc(x, size) 840 } 841 842 // nextSample returns the next sampling point for heap profiling. 843 // It produces a random variable with a geometric distribution and 844 // mean MemProfileRate. This is done by generating a uniformly 845 // distributed random number and applying the cumulative distribution 846 // function for an exponential. 847 func nextSample() int32 { 848 if GOOS == "plan9" { 849 // Plan 9 doesn't support floating point in note handler. 850 if g := getg(); g == g.m.gsignal { 851 return nextSampleNoFP() 852 } 853 } 854 855 period := MemProfileRate 856 857 // make nextSample not overflow. Maximum possible step is 858 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period. 859 switch { 860 case period > 0x7000000: 861 period = 0x7000000 862 case period == 0: 863 return 0 864 } 865 866 // Let m be the sample rate, 867 // the probability distribution function is m*exp(-mx), so the CDF is 868 // p = 1 - exp(-mx), so 869 // q = 1 - p == exp(-mx) 870 // log_e(q) = -mx 871 // -log_e(q)/m = x 872 // x = -log_e(q) * period 873 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency 874 const randomBitCount = 26 875 q := fastrand()%(1<<randomBitCount) + 1 876 qlog := fastlog2(float64(q)) - randomBitCount 877 if qlog > 0 { 878 qlog = 0 879 } 880 const minusLog2 = -0.6931471805599453 // -ln(2) 881 return int32(qlog*(minusLog2*float64(period))) + 1 882 } 883 884 // nextSampleNoFP is similar to nextSample, but uses older, 885 // simpler code to avoid floating point. 886 func nextSampleNoFP() int32 { 887 // Set first allocation sample size. 888 rate := MemProfileRate 889 if rate > 0x3fffffff { // make 2*rate not overflow 890 rate = 0x3fffffff 891 } 892 if rate != 0 { 893 return int32(fastrand() % uint32(2*rate)) 894 } 895 return 0 896 } 897 898 type persistentAlloc struct { 899 base unsafe.Pointer 900 off uintptr 901 } 902 903 var globalAlloc struct { 904 mutex 905 persistentAlloc 906 } 907 908 // Wrapper around sysAlloc that can allocate small chunks. 909 // There is no associated free operation. 910 // Intended for things like function/type/debug-related persistent data. 911 // If align is 0, uses default align (currently 8). 912 // The returned memory will be zeroed. 913 // 914 // Consider marking persistentalloc'd types go:notinheap. 915 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 916 var p unsafe.Pointer 917 systemstack(func() { 918 p = persistentalloc1(size, align, sysStat) 919 }) 920 return p 921 } 922 923 // Must run on system stack because stack growth can (re)invoke it. 924 // See issue 9174. 925 //go:systemstack 926 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { 927 const ( 928 chunk = 256 << 10 929 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 930 ) 931 932 if size == 0 { 933 throw("persistentalloc: size == 0") 934 } 935 if align != 0 { 936 if align&(align-1) != 0 { 937 throw("persistentalloc: align is not a power of 2") 938 } 939 if align > _PageSize { 940 throw("persistentalloc: align is too large") 941 } 942 } else { 943 align = 8 944 } 945 946 if size >= maxBlock { 947 return sysAlloc(size, sysStat) 948 } 949 950 mp := acquirem() 951 var persistent *persistentAlloc 952 if mp != nil && mp.p != 0 { 953 persistent = &mp.p.ptr().palloc 954 } else { 955 lock(&globalAlloc.mutex) 956 persistent = &globalAlloc.persistentAlloc 957 } 958 persistent.off = round(persistent.off, align) 959 if persistent.off+size > chunk || persistent.base == nil { 960 persistent.base = sysAlloc(chunk, &memstats.other_sys) 961 if persistent.base == nil { 962 if persistent == &globalAlloc.persistentAlloc { 963 unlock(&globalAlloc.mutex) 964 } 965 throw("runtime: cannot allocate memory") 966 } 967 persistent.off = 0 968 } 969 p := add(persistent.base, persistent.off) 970 persistent.off += size 971 releasem(mp) 972 if persistent == &globalAlloc.persistentAlloc { 973 unlock(&globalAlloc.mutex) 974 } 975 976 if sysStat != &memstats.other_sys { 977 mSysStatInc(sysStat, size) 978 mSysStatDec(&memstats.other_sys, size) 979 } 980 return p 981 }