github.com/epfl-dcsl/gotee@v0.0.0-20200909122901-014b35f5e5e9/src/runtime/malloc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan 60 // is now "idle", so it is returned to the mheap and no longer 61 // has a size class. 62 // This may coalesce it with adjacent idle mspans. 63 // 64 // 4. If an mspan remains idle for long enough, return its pages 65 // to the operating system. 66 // 67 // Allocating and freeing a large object uses the mheap 68 // directly, bypassing the mcache and mcentral. 69 // 70 // Free object slots in an mspan are zeroed only if mspan.needzero is 71 // false. If needzero is true, objects are zeroed as they are 72 // allocated. There are various benefits to delaying zeroing this way: 73 // 74 // 1. Stack frame allocation can avoid zeroing altogether. 75 // 76 // 2. It exhibits better temporal locality, since the program is 77 // probably about to write to the memory. 78 // 79 // 3. We don't zero pages that never get reused. 80 81 package runtime 82 83 import ( 84 "runtime/internal/sys" 85 "unsafe" 86 ) 87 88 const ( 89 debugMalloc = false 90 91 maxTinySize = _TinySize 92 tinySizeClass = _TinySizeClass 93 maxSmallSize = _MaxSmallSize 94 95 pageShift = _PageShift 96 pageSize = _PageSize 97 pageMask = _PageMask 98 // By construction, single page spans of the smallest object class 99 // have the most objects per span. 100 maxObjsPerSpan = pageSize / 8 101 102 mSpanInUse = _MSpanInUse 103 104 concurrentSweep = _ConcurrentSweep 105 106 _PageSize = 1 << _PageShift 107 _PageMask = _PageSize - 1 108 109 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 110 _64bit = 1 << (^uintptr(0) >> 63) / 2 111 112 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 113 _TinySize = 16 114 _TinySizeClass = int8(2) 115 116 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 117 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. 118 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth 119 120 // Per-P, per order stack segment cache size. 121 _StackCacheSize = 32 * 1024 122 123 // Number of orders that get caching. Order 0 is FixedStack 124 // and each successive order is twice as large. 125 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 126 // will be allocated directly. 127 // Since FixedStack is different on different systems, we 128 // must vary NumStackOrders to keep the same maximum cached size. 129 // OS | FixedStack | NumStackOrders 130 // -----------------+------------+--------------- 131 // linux/darwin/bsd | 2KB | 4 132 // windows/32 | 4KB | 3 133 // windows/64 | 8KB | 2 134 // plan9 | 4KB | 3 135 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 136 137 // Number of bits in page to span calculations (4k pages). 138 // On Windows 64-bit we limit the arena to 32GB or 35 bits. 139 // Windows counts memory used by page table into committed memory 140 // of the process, so we can't reserve too much memory. 141 // See https://golang.org/issue/5402 and https://golang.org/issue/5236. 142 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. 143 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 144 // The only exception is mips32 which only has access to low 2GB of virtual memory. 145 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, 146 // but as most devices have less than 4GB of physical memory anyway, we 147 // try to be conservative here, and only ask for a 2GB heap. 148 //TODO aghosn fix that for enclave. 149 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle)) 150 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift 151 152 // _MaxMem is the maximum heap arena size minus 1. 153 // 154 // On 32-bit, this is also the maximum heap pointer value, 155 // since the arena starts at address 0. 156 _MaxMem = 1<<_MHeapMap_TotalBits - 1 157 158 // Max number of threads to run garbage collection. 159 // 2, 3, and 4 are all plausible maximums depending 160 // on the hardware details of the machine. The garbage 161 // collector scales well to 32 cpus. 162 _MaxGcproc = 32 163 164 // minLegalPointer is the smallest possible legal pointer. 165 // This is the smallest possible architectural page size, 166 // since we assume that the first page is never mapped. 167 // 168 // This should agree with minZeroPage in the compiler. 169 minLegalPointer uintptr = 4096 170 ) 171 172 //TODO aghosn check this. 173 // We redefine our own _MaxMemEncl and the _MHeapMap_BitsEncl and replace them in the code. 174 // The original values are not used often so it should be feasible. 175 // Can set them in runtime.osinit (first of bootloading sequence) 176 var ( 177 _MHeapMap_TotalBitsEncl uintptr = 0 178 _MHeapMap_BitsEncl uintptr = 0 179 _MaxMemEncl uintptr = 0 180 ) 181 182 // physPageSize is the size in bytes of the OS's physical pages. 183 // Mapping and unmapping operations must be done at multiples of 184 // physPageSize. 185 // 186 // This must be set by the OS init code (typically in osinit) before 187 // mallocinit. 188 var physPageSize uintptr 189 190 // OS-defined helpers: 191 // 192 // sysAlloc obtains a large chunk of zeroed memory from the 193 // operating system, typically on the order of a hundred kilobytes 194 // or a megabyte. 195 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator 196 // may use larger alignment, so the caller must be careful to realign the 197 // memory obtained by sysAlloc. 198 // 199 // SysUnused notifies the operating system that the contents 200 // of the memory region are no longer needed and can be reused 201 // for other purposes. 202 // SysUsed notifies the operating system that the contents 203 // of the memory region are needed again. 204 // 205 // SysFree returns it unconditionally; this is only used if 206 // an out-of-memory error has been detected midway through 207 // an allocation. It is okay if SysFree is a no-op. 208 // 209 // SysReserve reserves address space without allocating memory. 210 // If the pointer passed to it is non-nil, the caller wants the 211 // reservation there, but SysReserve can still choose another 212 // location if that one is unavailable. On some systems and in some 213 // cases SysReserve will simply check that the address space is 214 // available and not actually reserve it. If SysReserve returns 215 // non-nil, it sets *reserved to true if the address space is 216 // reserved, false if it has merely been checked. 217 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator 218 // may use larger alignment, so the caller must be careful to realign the 219 // memory obtained by sysAlloc. 220 // 221 // SysMap maps previously reserved address space for use. 222 // The reserved argument is true if the address space was really 223 // reserved, not merely checked. 224 // 225 // SysFault marks a (already sysAlloc'd) region to fault 226 // if accessed. Used only for debugging the runtime. 227 228 func mallocinit() { 229 if class_to_size[_TinySizeClass] != _TinySize { 230 throw("bad TinySizeClass") 231 } 232 233 testdefersizes() 234 235 // Copy class sizes out for statistics table. 236 for i := range class_to_size { 237 memstats.by_size[i].size = uint32(class_to_size[i]) 238 } 239 240 // Check physPageSize. 241 if physPageSize == 0 { 242 // The OS init code failed to fetch the physical page size. 243 throw("failed to get system page size") 244 } 245 if physPageSize < minPhysPageSize { 246 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 247 throw("bad system page size") 248 } 249 if physPageSize&(physPageSize-1) != 0 { 250 print("system page size (", physPageSize, ") must be a power of 2\n") 251 throw("bad system page size") 252 } 253 254 // The auxiliary regions start at p and are laid out in the 255 // following order: spans, bitmap, arena. 256 var p, pSize uintptr 257 var reserved bool 258 259 var localMaxMem uintptr = _MaxMem 260 if isEnclave { 261 localMaxMem = _MaxMemEncl 262 } 263 264 // The spans array holds one *mspan per _PageSize of arena. 265 var spansSize uintptr = (localMaxMem + 1) / _PageSize * sys.PtrSize 266 spansSize = round(spansSize, _PageSize) 267 // The bitmap holds 2 bits per word of arena. 268 var bitmapSize uintptr = (localMaxMem + 1) / (sys.PtrSize * 8 / 2) 269 bitmapSize = round(bitmapSize, _PageSize) 270 271 // Set up the allocation arena, a contiguous area of memory where 272 // allocated data will be found. 273 if sys.PtrSize == 8 { 274 // On a 64-bit machine, allocate from a single contiguous reservation. 275 // 512 GB (MaxMem) should be big enough for now. 276 // 277 // The code will work with the reservation at any address, but ask 278 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 279 // Allocating a 512 GB region takes away 39 bits, and the amd64 280 // doesn't let us choose the top 17 bits, so that leaves the 9 bits 281 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 282 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 283 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 284 // UTF-8 sequences, and they are otherwise as far away from 285 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 286 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 287 // on OS X during thread allocations. 0x00c0 causes conflicts with 288 // AddressSanitizer which reserves all memory up to 0x0100. 289 // These choices are both for debuggability and to reduce the 290 // odds of a conservative garbage collector (as is still used in gccgo) 291 // not collecting memory because some non-pointer block of memory 292 // had a bit pattern that matched a memory address. 293 // 294 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) 295 // but it hardly matters: e0 00 is not valid UTF-8 either. 296 // 297 // If this fails we fall back to the 32 bit memory mechanism 298 // 299 // However, on arm64, we ignore all this advice above and slam the 300 // allocation at 0x40 << 32 because when using 4k pages with 3-level 301 // translation buffers, the user address space is limited to 39 bits 302 // On darwin/arm64, the address space is even smaller. 303 arenaSize := round(localMaxMem, _PageSize) 304 pSize = bitmapSize + spansSize + arenaSize + _PageSize 305 for i := 0; i <= 0x7f; i++ { 306 switch { 307 case GOARCH == "arm64" && GOOS == "darwin": 308 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 309 case GOARCH == "arm64": 310 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 311 case isEnclave == true: 312 // The value reserved by sgx 313 p = Cooprt.eHeap 314 default: 315 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 316 } 317 // TODO @aghosn, here we try to allocate at 0xc0... but does not work 318 // so we move on to 0x1c0... which should not fail. We interpose in 319 // SysReserve to get the value that we want and relocate the alloc. 320 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 321 if p != 0 { 322 break 323 } 324 } 325 } 326 327 if p == 0 { 328 if isEnclave { 329 throw("mallocinit assumption about address space failed!") 330 } 331 // On a 32-bit machine, we can't typically get away 332 // with a giant virtual address space reservation. 333 // Instead we map the memory information bitmap 334 // immediately after the data segment, large enough 335 // to handle the entire 4GB address space (256 MB), 336 // along with a reservation for an initial arena. 337 // When that gets used up, we'll start asking the kernel 338 // for any memory anywhere. 339 340 // We want to start the arena low, but if we're linked 341 // against C code, it's possible global constructors 342 // have called malloc and adjusted the process' brk. 343 // Query the brk so we can avoid trying to map the 344 // arena over it (which will cause the kernel to put 345 // the arena somewhere else, likely at a high 346 // address). 347 procBrk := sbrk0() 348 349 // If we fail to allocate, try again with a smaller arena. 350 // This is necessary on Android L where we share a process 351 // with ART, which reserves virtual memory aggressively. 352 // In the worst case, fall back to a 0-sized initial arena, 353 // in the hope that subsequent reservations will succeed. 354 arenaSizes := []uintptr{ 355 512 << 20, 356 256 << 20, 357 128 << 20, 358 0, 359 } 360 361 for _, arenaSize := range arenaSizes { 362 // SysReserve treats the address we ask for, end, as a hint, 363 // not as an absolute requirement. If we ask for the end 364 // of the data segment but the operating system requires 365 // a little more space before we can start allocating, it will 366 // give out a slightly higher pointer. Except QEMU, which 367 // is buggy, as usual: it won't adjust the pointer upward. 368 // So adjust it upward a little bit ourselves: 1/4 MB to get 369 // away from the running binary image and then round up 370 // to a MB boundary. 371 p = round(firstmoduledata.end+(1<<18), 1<<20) 372 pSize = bitmapSize + spansSize + arenaSize + _PageSize 373 if p <= procBrk && procBrk < p+pSize { 374 // Move the start above the brk, 375 // leaving some room for future brk 376 // expansion. 377 p = round(procBrk+(1<<20), 1<<20) 378 } 379 //TODO @aghosn this one is not called in default case because we did 380 // not fail above, so p != 0. 381 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 382 if p != 0 { 383 break 384 } 385 } 386 if p == 0 { 387 throw("runtime: cannot reserve arena virtual address space") 388 } 389 } 390 391 // PageSize can be larger than OS definition of page size, 392 // so SysReserve can give us a PageSize-unaligned pointer. 393 // To overcome this we ask for PageSize more and round up the pointer. 394 p1 := round(p, _PageSize) 395 pSize -= p1 - p 396 397 spansStart := p1 398 p1 += spansSize 399 mheap_.bitmap = p1 + bitmapSize 400 p1 += bitmapSize 401 if sys.PtrSize == 4 { 402 // Set arena_start such that we can accept memory 403 // reservations located anywhere in the 4GB virtual space. 404 mheap_.arena_start = 0 405 } else { 406 mheap_.arena_start = p1 407 } 408 mheap_.arena_end = p + pSize 409 mheap_.arena_used = p1 410 mheap_.arena_alloc = p1 411 mheap_.arena_reserved = reserved 412 413 if mheap_.arena_start&(_PageSize-1) != 0 { 414 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) 415 throw("misrounded allocation in mallocinit") 416 } 417 418 //if isEnclave { 419 // print("mheap_.arena_end ", hex(mheap_.arena_end), "\n") 420 // print("mheap_.arena_used ", hex(mheap_.arena_used), "\n") 421 // print("mheap_.arena_alloc ", hex(mheap_.arena_alloc), "\n") 422 // print("mheap_.arena_reserved ", mheap_.arena_reserved, "\n") 423 //} 424 // Initialize the rest of the allocator. 425 mheap_.init(spansStart, spansSize) 426 _g_ := getg() 427 _g_.m.mcache = allocmcache() 428 } 429 430 // sysAlloc allocates the next n bytes from the heap arena. The 431 // returned pointer is always _PageSize aligned and between 432 // h.arena_start and h.arena_end. sysAlloc returns nil on failure. 433 // There is no corresponding free function. 434 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { 435 // strandLimit is the maximum number of bytes to strand from 436 // the current arena block. If we would need to strand more 437 // than this, we fall back to sysAlloc'ing just enough for 438 // this allocation. 439 const strandLimit = 16 << 20 440 441 var localMaxMem uintptr = _MaxMem 442 if isEnclave { 443 localMaxMem = _MaxMemEncl 444 } 445 446 if n > h.arena_end-h.arena_alloc { 447 // If we haven't grown the arena to _MaxMem yet, try 448 // to reserve some more address space. 449 p_size := round(n+_PageSize, 256<<20) 450 new_end := h.arena_end + p_size // Careful: can overflow 451 if h.arena_end <= new_end && new_end-h.arena_start-1 <= localMaxMem { 452 // TODO: It would be bad if part of the arena 453 // is reserved and part is not. 454 var reserved bool 455 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) 456 if p == 0 { 457 // TODO: Try smaller reservation 458 // growths in case we're in a crowded 459 // 32-bit address space. 460 goto reservationFailed 461 } 462 // p can be just about anywhere in the address 463 // space, including before arena_end. 464 if p == h.arena_end { 465 // The new block is contiguous with 466 // the current block. Extend the 467 // current arena block. 468 h.arena_end = new_end 469 h.arena_reserved = reserved 470 } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= localMaxMem && h.arena_end-h.arena_alloc < strandLimit { 471 // We were able to reserve more memory 472 // within the arena space, but it's 473 // not contiguous with our previous 474 // reservation. It could be before or 475 // after our current arena_used. 476 // 477 // Keep everything page-aligned. 478 // Our pages are bigger than hardware pages. 479 h.arena_end = p + p_size 480 p = round(p, _PageSize) 481 h.arena_alloc = p 482 h.arena_reserved = reserved 483 } else { 484 // We got a mapping, but either 485 // 486 // 1) It's not in the arena, so we 487 // can't use it. (This should never 488 // happen on 32-bit.) 489 // 490 // 2) We would need to discard too 491 // much of our current arena block to 492 // use it. 493 // 494 // We haven't added this allocation to 495 // the stats, so subtract it from a 496 // fake stat (but avoid underflow). 497 // 498 // We'll fall back to a small sysAlloc. 499 stat := uint64(p_size) 500 sysFree(unsafe.Pointer(p), p_size, &stat) 501 } 502 } 503 } 504 505 if n <= h.arena_end-h.arena_alloc { 506 // Keep taking from our reservation. 507 p := h.arena_alloc 508 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) 509 h.arena_alloc += n 510 if h.arena_alloc > h.arena_used { 511 h.setArenaUsed(h.arena_alloc, true) 512 } 513 514 if p&(_PageSize-1) != 0 { 515 throw("misrounded allocation in MHeap_SysAlloc") 516 } 517 return unsafe.Pointer(p) 518 } 519 520 reservationFailed: 521 // If using 64-bit, our reservation is all we have. 522 if sys.PtrSize != 4 { 523 return nil 524 } 525 526 // On 32-bit, once the reservation is gone we can 527 // try to get memory at a location chosen by the OS. 528 p_size := round(n, _PageSize) + _PageSize 529 p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) 530 if p == 0 { 531 return nil 532 } 533 534 if p < h.arena_start || p+p_size-h.arena_start > localMaxMem { 535 // This shouldn't be possible because _MaxMem is the 536 // whole address space on 32-bit. 537 top := uint64(h.arena_start) + uint64(localMaxMem) 538 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n") 539 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) 540 return nil 541 } 542 543 p += -p & (_PageSize - 1) 544 if p+n > h.arena_used { 545 h.setArenaUsed(p+n, true) 546 } 547 548 if p&(_PageSize-1) != 0 { 549 throw("misrounded allocation in MHeap_SysAlloc") 550 } 551 return unsafe.Pointer(p) 552 } 553 554 // base address for all 0-byte allocations 555 var zerobase uintptr 556 557 // nextFreeFast returns the next free object if one is quickly available. 558 // Otherwise it returns 0. 559 func nextFreeFast(s *mspan) gclinkptr { 560 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 561 if theBit < 64 { 562 result := s.freeindex + uintptr(theBit) 563 if result < s.nelems { 564 freeidx := result + 1 565 if freeidx%64 == 0 && freeidx != s.nelems { 566 return 0 567 } 568 s.allocCache >>= uint(theBit + 1) 569 s.freeindex = freeidx 570 s.allocCount++ 571 return gclinkptr(result*s.elemsize + s.base()) 572 } 573 } 574 return 0 575 } 576 577 // nextFree returns the next free object from the cached span if one is available. 578 // Otherwise it refills the cache with a span with an available object and 579 // returns that object along with a flag indicating that this was a heavy 580 // weight allocation. If it is a heavy weight allocation the caller must 581 // determine whether a new GC cycle needs to be started or if the GC is active 582 // whether this goroutine needs to assist the GC. 583 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 584 s = c.alloc[spc] 585 shouldhelpgc = false 586 freeIndex := s.nextFreeIndex() 587 if freeIndex == s.nelems { 588 // The span is full. 589 if uintptr(s.allocCount) != s.nelems { 590 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 591 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 592 } 593 systemstack(func() { 594 c.refill(spc) 595 }) 596 shouldhelpgc = true 597 s = c.alloc[spc] 598 599 freeIndex = s.nextFreeIndex() 600 } 601 602 if freeIndex >= s.nelems { 603 throw("freeIndex is not valid") 604 } 605 606 v = gclinkptr(freeIndex*s.elemsize + s.base()) 607 s.allocCount++ 608 if uintptr(s.allocCount) > s.nelems { 609 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 610 throw("s.allocCount > s.nelems") 611 } 612 return 613 } 614 615 // Allocate an object of size bytes. 616 // Small objects are allocated from the per-P cache's free lists. 617 // Large objects (> 32 kB) are allocated straight from the heap. 618 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 619 if gcphase == _GCmarktermination { 620 throw("mallocgc called with gcphase == _GCmarktermination") 621 } 622 623 if size == 0 { 624 return unsafe.Pointer(&zerobase) 625 } 626 627 if debug.sbrk != 0 { 628 align := uintptr(16) 629 if typ != nil { 630 align = uintptr(typ.align) 631 } 632 return persistentalloc(size, align, &memstats.other_sys) 633 } 634 635 // assistG is the G to charge for this allocation, or nil if 636 // GC is not currently active. 637 var assistG *g 638 if gcBlackenEnabled != 0 { 639 // Charge the current user G for this allocation. 640 assistG = getg() 641 if assistG.m.curg != nil { 642 assistG = assistG.m.curg 643 } 644 // Charge the allocation against the G. We'll account 645 // for internal fragmentation at the end of mallocgc. 646 assistG.gcAssistBytes -= int64(size) 647 648 if assistG.gcAssistBytes < 0 { 649 // This G is in debt. Assist the GC to correct 650 // this before allocating. This must happen 651 // before disabling preemption. 652 gcAssistAlloc(assistG) 653 } 654 } 655 656 // Set mp.mallocing to keep from being preempted by GC. 657 mp := acquirem() 658 if mp.mallocing != 0 { 659 throw("malloc deadlock") 660 } 661 if mp.gsignal == getg() { 662 throw("malloc during signal") 663 } 664 mp.mallocing = 1 665 666 shouldhelpgc := false 667 dataSize := size 668 c := gomcache() 669 if c == nil { 670 gp := getg() 671 println("The g we got ", gp, " with its m ", gp.m, "and mcache ", gp.m.mcache, " gp.m.p", gp.m.p) 672 panic("malloc has no access to mcache") 673 } /*else if isEnclave { 674 gp := getg() 675 println("c ok, g ", gp, " with m ", gp.m, " mcache", gp.m.mcache) 676 }*/ 677 var x unsafe.Pointer 678 noscan := typ == nil || typ.kind&kindNoPointers != 0 679 if size <= maxSmallSize { 680 if noscan && size < maxTinySize { 681 // Tiny allocator. 682 // 683 // Tiny allocator combines several tiny allocation requests 684 // into a single memory block. The resulting memory block 685 // is freed when all subobjects are unreachable. The subobjects 686 // must be noscan (don't have pointers), this ensures that 687 // the amount of potentially wasted memory is bounded. 688 // 689 // Size of the memory block used for combining (maxTinySize) is tunable. 690 // Current setting is 16 bytes, which relates to 2x worst case memory 691 // wastage (when all but one subobjects are unreachable). 692 // 8 bytes would result in no wastage at all, but provides less 693 // opportunities for combining. 694 // 32 bytes provides more opportunities for combining, 695 // but can lead to 4x worst case wastage. 696 // The best case winning is 8x regardless of block size. 697 // 698 // Objects obtained from tiny allocator must not be freed explicitly. 699 // So when an object will be freed explicitly, we ensure that 700 // its size >= maxTinySize. 701 // 702 // SetFinalizer has a special case for objects potentially coming 703 // from tiny allocator, it such case it allows to set finalizers 704 // for an inner byte of a memory block. 705 // 706 // The main targets of tiny allocator are small strings and 707 // standalone escaping variables. On a json benchmark 708 // the allocator reduces number of allocations by ~12% and 709 // reduces heap size by ~20%. 710 off := c.tinyoffset 711 // Align tiny pointer for required (conservative) alignment. 712 if size&7 == 0 { 713 off = round(off, 8) 714 } else if size&3 == 0 { 715 off = round(off, 4) 716 } else if size&1 == 0 { 717 off = round(off, 2) 718 } 719 if off+size <= maxTinySize && c.tiny != 0 { 720 // The object fits into existing tiny block. 721 x = unsafe.Pointer(c.tiny + off) 722 c.tinyoffset = off + size 723 c.local_tinyallocs++ 724 mp.mallocing = 0 725 releasem(mp) 726 return x 727 } 728 // Allocate a new maxTinySize block. 729 span := c.alloc[tinySpanClass] 730 v := nextFreeFast(span) 731 if v == 0 { 732 v, _, shouldhelpgc = c.nextFree(tinySpanClass) 733 } 734 x = unsafe.Pointer(v) 735 (*[2]uint64)(x)[0] = 0 736 (*[2]uint64)(x)[1] = 0 737 // See if we need to replace the existing tiny block with the new one 738 // based on amount of remaining free space. 739 if size < c.tinyoffset || c.tiny == 0 { 740 c.tiny = uintptr(x) 741 c.tinyoffset = size 742 } 743 size = maxTinySize 744 } else { 745 var sizeclass uint8 746 if size <= smallSizeMax-8 { 747 sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv] 748 } else { 749 sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv] 750 } 751 size = uintptr(class_to_size[sizeclass]) 752 spc := makeSpanClass(sizeclass, noscan) 753 span := c.alloc[spc] 754 v := nextFreeFast(span) 755 if v == 0 { 756 v, span, shouldhelpgc = c.nextFree(spc) 757 } 758 x = unsafe.Pointer(v) 759 if needzero && span.needzero != 0 { 760 memclrNoHeapPointers(unsafe.Pointer(v), size) 761 } 762 } 763 } else { 764 var s *mspan 765 shouldhelpgc = true 766 systemstack(func() { 767 s = largeAlloc(size, needzero, noscan) 768 }) 769 s.freeindex = 1 770 s.allocCount = 1 771 x = unsafe.Pointer(s.base()) 772 size = s.elemsize 773 } 774 775 var scanSize uintptr 776 if !noscan { 777 // If allocating a defer+arg block, now that we've picked a malloc size 778 // large enough to hold everything, cut the "asked for" size down to 779 // just the defer header, so that the GC bitmap will record the arg block 780 // as containing nothing at all (as if it were unused space at the end of 781 // a malloc block caused by size rounding). 782 // The defer arg areas are scanned as part of scanstack. 783 if typ == deferType { 784 dataSize = unsafe.Sizeof(_defer{}) 785 } 786 heapBitsSetType(uintptr(x), size, dataSize, typ) 787 if dataSize > typ.size { 788 // Array allocation. If there are any 789 // pointers, GC has to scan to the last 790 // element. 791 if typ.ptrdata != 0 { 792 scanSize = dataSize - typ.size + typ.ptrdata 793 } 794 } else { 795 scanSize = typ.ptrdata 796 } 797 c.local_scan += scanSize 798 } 799 800 // Ensure that the stores above that initialize x to 801 // type-safe memory and set the heap bits occur before 802 // the caller can make x observable to the garbage 803 // collector. Otherwise, on weakly ordered machines, 804 // the garbage collector could follow a pointer to x, 805 // but see uninitialized memory or stale heap bits. 806 publicationBarrier() 807 808 // Allocate black during GC. 809 // All slots hold nil so no scanning is needed. 810 // This may be racing with GC so do it atomically if there can be 811 // a race marking the bit. 812 if gcphase != _GCoff { 813 gcmarknewobject(uintptr(x), size, scanSize) 814 } 815 816 if raceenabled { 817 racemalloc(x, size) 818 } 819 820 if msanenabled { 821 msanmalloc(x, size) 822 } 823 824 mp.mallocing = 0 825 releasem(mp) 826 827 if debug.allocfreetrace != 0 { 828 tracealloc(x, size, typ) 829 } 830 831 if rate := MemProfileRate; rate > 0 { 832 if size < uintptr(rate) && int32(size) < c.next_sample { 833 c.next_sample -= int32(size) 834 } else { 835 mp := acquirem() 836 profilealloc(mp, x, size) 837 releasem(mp) 838 } 839 } 840 841 if assistG != nil { 842 // Account for internal fragmentation in the assist 843 // debt now that we know it. 844 assistG.gcAssistBytes -= int64(size - dataSize) 845 } 846 847 if shouldhelpgc { 848 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 849 gcStart(gcBackgroundMode, t) 850 } 851 } 852 853 return x 854 } 855 856 func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { 857 // print("largeAlloc size=", size, "\n") 858 859 if size+_PageSize < size { 860 throw("out of memory") 861 } 862 npages := size >> _PageShift 863 if size&_PageMask != 0 { 864 npages++ 865 } 866 867 // Deduct credit for this span allocation and sweep if 868 // necessary. mHeap_Alloc will also sweep npages, so this only 869 // pays the debt down to npage pages. 870 deductSweepCredit(npages*_PageSize, npages) 871 872 s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero) 873 if s == nil { 874 throw("out of memory") 875 } 876 s.limit = s.base() + size 877 heapBitsForSpan(s.base()).initSpan(s) 878 return s 879 } 880 881 // implementation of new builtin 882 // compiler (both frontend and SSA backend) knows the signature 883 // of this function 884 func newobject(typ *_type) unsafe.Pointer { 885 return mallocgc(typ.size, typ, true) 886 } 887 888 //go:linkname reflect_unsafe_New reflect.unsafe_New 889 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 890 return newobject(typ) 891 } 892 893 // newarray allocates an array of n elements of type typ. 894 func newarray(typ *_type, n int) unsafe.Pointer { 895 if n == 1 { 896 return mallocgc(typ.size, typ, true) 897 } 898 if n < 0 || uintptr(n) > maxSliceCap(typ.size) { 899 panic(plainError("runtime: allocation size out of range")) 900 } 901 return mallocgc(typ.size*uintptr(n), typ, true) 902 } 903 904 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 905 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 906 return newarray(typ, n) 907 } 908 909 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 910 mp.mcache.next_sample = nextSample() 911 mProf_Malloc(x, size) 912 } 913 914 // nextSample returns the next sampling point for heap profiling. The goal is 915 // to sample allocations on average every MemProfileRate bytes, but with a 916 // completely random distribution over the allocation timeline; this 917 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 918 // processes, the distance between two samples follows the exponential 919 // distribution (exp(MemProfileRate)), so the best return value is a random 920 // number taken from an exponential distribution whose mean is MemProfileRate. 921 func nextSample() int32 { 922 if GOOS == "plan9" { 923 // Plan 9 doesn't support floating point in note handler. 924 if g := getg(); g == g.m.gsignal { 925 return nextSampleNoFP() 926 } 927 } 928 929 return fastexprand(MemProfileRate) 930 } 931 932 // fastexprand returns a random number from an exponential distribution with 933 // the specified mean. 934 func fastexprand(mean int) int32 { 935 // Avoid overflow. Maximum possible step is 936 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 937 switch { 938 case mean > 0x7000000: 939 mean = 0x7000000 940 case mean == 0: 941 return 0 942 } 943 944 // Take a random sample of the exponential distribution exp(-mean*x). 945 // The probability distribution function is mean*exp(-mean*x), so the CDF is 946 // p = 1 - exp(-mean*x), so 947 // q = 1 - p == exp(-mean*x) 948 // log_e(q) = -mean*x 949 // -log_e(q)/mean = x 950 // x = -log_e(q) * mean 951 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 952 const randomBitCount = 26 953 q := fastrand()%(1<<randomBitCount) + 1 954 qlog := fastlog2(float64(q)) - randomBitCount 955 if qlog > 0 { 956 qlog = 0 957 } 958 const minusLog2 = -0.6931471805599453 // -ln(2) 959 return int32(qlog*(minusLog2*float64(mean))) + 1 960 } 961 962 // nextSampleNoFP is similar to nextSample, but uses older, 963 // simpler code to avoid floating point. 964 func nextSampleNoFP() int32 { 965 // Set first allocation sample size. 966 rate := MemProfileRate 967 if rate > 0x3fffffff { // make 2*rate not overflow 968 rate = 0x3fffffff 969 } 970 if rate != 0 { 971 return int32(fastrand() % uint32(2*rate)) 972 } 973 return 0 974 } 975 976 type persistentAlloc struct { 977 base *notInHeap 978 off uintptr 979 } 980 981 var globalAlloc struct { 982 mutex 983 persistentAlloc 984 } 985 986 // Wrapper around sysAlloc that can allocate small chunks. 987 // There is no associated free operation. 988 // Intended for things like function/type/debug-related persistent data. 989 // If align is 0, uses default align (currently 8). 990 // The returned memory will be zeroed. 991 // 992 // Consider marking persistentalloc'd types go:notinheap. 993 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 994 var p *notInHeap 995 systemstack(func() { 996 p = persistentalloc1(size, align, sysStat) 997 }) 998 return unsafe.Pointer(p) 999 } 1000 1001 // Must run on system stack because stack growth can (re)invoke it. 1002 // See issue 9174. 1003 //go:systemstack 1004 func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { 1005 const ( 1006 chunk = 256 << 10 1007 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1008 ) 1009 1010 if size == 0 { 1011 throw("persistentalloc: size == 0") 1012 } 1013 if align != 0 { 1014 if align&(align-1) != 0 { 1015 throw("persistentalloc: align is not a power of 2") 1016 } 1017 if align > _PageSize { 1018 throw("persistentalloc: align is too large") 1019 } 1020 } else { 1021 align = 8 1022 } 1023 1024 if size >= maxBlock { 1025 return (*notInHeap)(sysAlloc(size, sysStat)) 1026 } 1027 1028 mp := acquirem() 1029 var persistent *persistentAlloc 1030 if mp != nil && mp.p != 0 { 1031 persistent = &mp.p.ptr().palloc 1032 } else { 1033 lock(&globalAlloc.mutex) 1034 persistent = &globalAlloc.persistentAlloc 1035 } 1036 persistent.off = round(persistent.off, align) 1037 if persistent.off+size > chunk || persistent.base == nil { 1038 persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys)) 1039 if persistent.base == nil { 1040 if persistent == &globalAlloc.persistentAlloc { 1041 unlock(&globalAlloc.mutex) 1042 } 1043 throw("runtime: cannot allocate memory (malloc)") 1044 } 1045 persistent.off = 0 1046 } 1047 p := persistent.base.add(persistent.off) 1048 persistent.off += size 1049 releasem(mp) 1050 if persistent == &globalAlloc.persistentAlloc { 1051 unlock(&globalAlloc.mutex) 1052 } 1053 1054 if sysStat != &memstats.other_sys { 1055 mSysStatInc(sysStat, size) 1056 mSysStatDec(&memstats.other_sys, size) 1057 } 1058 return p 1059 } 1060 1061 // notInHeap is off-heap memory allocated by a lower-level allocator 1062 // like sysAlloc or persistentAlloc. 1063 // 1064 // In general, it's better to use real types marked as go:notinheap, 1065 // but this serves as a generic type for situations where that isn't 1066 // possible (like in the allocators). 1067 // 1068 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1069 // 1070 //go:notinheap 1071 type notInHeap struct{} 1072 1073 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1074 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1075 }