github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/malloc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator, based on tcmalloc. 6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 7 8 // The main allocator works in runs of pages. 9 // Small allocation sizes (up to and including 32 kB) are 10 // rounded to one of about 100 size classes, each of which 11 // has its own free list of objects of exactly that size. 12 // Any free page of memory can be split into a set of objects 13 // of one size class, which are then managed using free list 14 // allocators. 15 // 16 // The allocator's data structures are: 17 // 18 // FixAlloc: a free-list allocator for fixed-size objects, 19 // used to manage storage used by the allocator. 20 // MHeap: the malloc heap, managed at page (4096-byte) granularity. 21 // MSpan: a run of pages managed by the MHeap. 22 // MCentral: a shared free list for a given size class. 23 // MCache: a per-thread (in Go, per-P) cache for small objects. 24 // MStats: allocation statistics. 25 // 26 // Allocating a small object proceeds up a hierarchy of caches: 27 // 28 // 1. Round the size up to one of the small size classes 29 // and look in the corresponding MCache free list. 30 // If the list is not empty, allocate an object from it. 31 // This can all be done without acquiring a lock. 32 // 33 // 2. If the MCache free list is empty, replenish it by 34 // taking a bunch of objects from the MCentral free list. 35 // Moving a bunch amortizes the cost of acquiring the MCentral lock. 36 // 37 // 3. If the MCentral free list is empty, replenish it by 38 // allocating a run of pages from the MHeap and then 39 // chopping that memory into objects of the given size. 40 // Allocating many objects amortizes the cost of locking 41 // the heap. 42 // 43 // 4. If the MHeap is empty or has no page runs large enough, 44 // allocate a new group of pages (at least 1MB) from the 45 // operating system. Allocating a large run of pages 46 // amortizes the cost of talking to the operating system. 47 // 48 // Freeing a small object proceeds up the same hierarchy: 49 // 50 // 1. Look up the size class for the object and add it to 51 // the MCache free list. 52 // 53 // 2. If the MCache free list is too long or the MCache has 54 // too much memory, return some to the MCentral free lists. 55 // 56 // 3. If all the objects in a given span have returned to 57 // the MCentral list, return that span to the page heap. 58 // 59 // 4. If the heap has too much memory, return some to the 60 // operating system. 61 // 62 // TODO(rsc): Step 4 is not implemented. 63 // 64 // Allocating and freeing a large object uses the page heap 65 // directly, bypassing the MCache and MCentral free lists. 66 // 67 // The small objects on the MCache and MCentral free lists 68 // may or may not be zeroed. They are zeroed if and only if 69 // the second word of the object is zero. A span in the 70 // page heap is zeroed unless s->needzero is set. When a span 71 // is allocated to break into small objects, it is zeroed if needed 72 // and s->needzero is set. There are two main benefits to delaying the 73 // zeroing this way: 74 // 75 // 1. stack frames allocated from the small object lists 76 // or the page heap can avoid zeroing altogether. 77 // 2. the cost of zeroing when reusing a small object is 78 // charged to the mutator, not the garbage collector. 79 80 package runtime 81 82 import ( 83 "runtime/internal/sys" 84 "unsafe" 85 ) 86 87 const ( 88 debugMalloc = false 89 90 flagNoScan = _FlagNoScan 91 flagNoZero = _FlagNoZero 92 93 maxTinySize = _TinySize 94 tinySizeClass = _TinySizeClass 95 maxSmallSize = _MaxSmallSize 96 97 pageShift = _PageShift 98 pageSize = _PageSize 99 pageMask = _PageMask 100 101 mSpanInUse = _MSpanInUse 102 103 concurrentSweep = _ConcurrentSweep 104 ) 105 106 const ( 107 _PageShift = 13 108 _PageSize = 1 << _PageShift 109 _PageMask = _PageSize - 1 110 ) 111 112 const ( 113 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 114 _64bit = 1 << (^uintptr(0) >> 63) / 2 115 116 // Computed constant. The definition of MaxSmallSize and the 117 // algorithm in msize.go produces some number of different allocation 118 // size classes. NumSizeClasses is that number. It's needed here 119 // because there are static arrays of this length; when msize runs its 120 // size choosing algorithm it double-checks that NumSizeClasses agrees. 121 _NumSizeClasses = 67 122 123 // Tunable constants. 124 _MaxSmallSize = 32 << 10 125 126 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 127 _TinySize = 16 128 _TinySizeClass = 2 129 130 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 131 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. 132 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth 133 134 // Per-P, per order stack segment cache size. 135 _StackCacheSize = 32 * 1024 136 137 // Number of orders that get caching. Order 0 is FixedStack 138 // and each successive order is twice as large. 139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 140 // will be allocated directly. 141 // Since FixedStack is different on different systems, we 142 // must vary NumStackOrders to keep the same maximum cached size. 143 // OS | FixedStack | NumStackOrders 144 // -----------------+------------+--------------- 145 // linux/darwin/bsd | 2KB | 4 146 // windows/32 | 4KB | 3 147 // windows/64 | 8KB | 2 148 // plan9 | 4KB | 3 149 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 150 151 // Number of bits in page to span calculations (4k pages). 152 // On Windows 64-bit we limit the arena to 32GB or 35 bits. 153 // Windows counts memory used by page table into committed memory 154 // of the process, so we can't reserve too much memory. 155 // See https://golang.org/issue/5402 and https://golang.org/issue/5236. 156 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. 157 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 158 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, 159 // but as most devices have less than 4GB of physical memory anyway, we 160 // try to be conservative here, and only ask for a 2GB heap. 161 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32 162 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift 163 164 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1) 165 166 // Max number of threads to run garbage collection. 167 // 2, 3, and 4 are all plausible maximums depending 168 // on the hardware details of the machine. The garbage 169 // collector scales well to 32 cpus. 170 _MaxGcproc = 32 171 ) 172 173 // Page number (address>>pageShift) 174 type pageID uintptr 175 176 const _MaxArena32 = 2 << 30 177 178 // OS-defined helpers: 179 // 180 // sysAlloc obtains a large chunk of zeroed memory from the 181 // operating system, typically on the order of a hundred kilobytes 182 // or a megabyte. 183 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator 184 // may use larger alignment, so the caller must be careful to realign the 185 // memory obtained by sysAlloc. 186 // 187 // SysUnused notifies the operating system that the contents 188 // of the memory region are no longer needed and can be reused 189 // for other purposes. 190 // SysUsed notifies the operating system that the contents 191 // of the memory region are needed again. 192 // 193 // SysFree returns it unconditionally; this is only used if 194 // an out-of-memory error has been detected midway through 195 // an allocation. It is okay if SysFree is a no-op. 196 // 197 // SysReserve reserves address space without allocating memory. 198 // If the pointer passed to it is non-nil, the caller wants the 199 // reservation there, but SysReserve can still choose another 200 // location if that one is unavailable. On some systems and in some 201 // cases SysReserve will simply check that the address space is 202 // available and not actually reserve it. If SysReserve returns 203 // non-nil, it sets *reserved to true if the address space is 204 // reserved, false if it has merely been checked. 205 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator 206 // may use larger alignment, so the caller must be careful to realign the 207 // memory obtained by sysAlloc. 208 // 209 // SysMap maps previously reserved address space for use. 210 // The reserved argument is true if the address space was really 211 // reserved, not merely checked. 212 // 213 // SysFault marks a (already sysAlloc'd) region to fault 214 // if accessed. Used only for debugging the runtime. 215 216 func mallocinit() { 217 initSizes() 218 219 if class_to_size[_TinySizeClass] != _TinySize { 220 throw("bad TinySizeClass") 221 } 222 223 var p, bitmapSize, spansSize, pSize, limit uintptr 224 var reserved bool 225 226 // limit = runtime.memlimit(); 227 // See https://golang.org/issue/5049 228 // TODO(rsc): Fix after 1.1. 229 limit = 0 230 231 // Set up the allocation arena, a contiguous area of memory where 232 // allocated data will be found. The arena begins with a bitmap large 233 // enough to hold 4 bits per allocated word. 234 if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) { 235 // On a 64-bit machine, allocate from a single contiguous reservation. 236 // 512 GB (MaxMem) should be big enough for now. 237 // 238 // The code will work with the reservation at any address, but ask 239 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 240 // Allocating a 512 GB region takes away 39 bits, and the amd64 241 // doesn't let us choose the top 17 bits, so that leaves the 9 bits 242 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 243 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 244 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 245 // UTF-8 sequences, and they are otherwise as far away from 246 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 247 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 248 // on OS X during thread allocations. 0x00c0 causes conflicts with 249 // AddressSanitizer which reserves all memory up to 0x0100. 250 // These choices are both for debuggability and to reduce the 251 // odds of a conservative garbage collector (as is still used in gccgo) 252 // not collecting memory because some non-pointer block of memory 253 // had a bit pattern that matched a memory address. 254 // 255 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) 256 // but it hardly matters: e0 00 is not valid UTF-8 either. 257 // 258 // If this fails we fall back to the 32 bit memory mechanism 259 // 260 // However, on arm64, we ignore all this advice above and slam the 261 // allocation at 0x40 << 32 because when using 4k pages with 3-level 262 // translation buffers, the user address space is limited to 39 bits 263 // On darwin/arm64, the address space is even smaller. 264 arenaSize := round(_MaxMem, _PageSize) 265 bitmapSize = arenaSize / (sys.PtrSize * 8 / 4) 266 spansSize = arenaSize / _PageSize * sys.PtrSize 267 spansSize = round(spansSize, _PageSize) 268 for i := 0; i <= 0x7f; i++ { 269 switch { 270 case GOARCH == "arm64" && GOOS == "darwin": 271 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 272 case GOARCH == "arm64": 273 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 274 default: 275 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 276 } 277 pSize = bitmapSize + spansSize + arenaSize + _PageSize 278 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 279 if p != 0 { 280 break 281 } 282 } 283 } 284 285 if p == 0 { 286 // On a 32-bit machine, we can't typically get away 287 // with a giant virtual address space reservation. 288 // Instead we map the memory information bitmap 289 // immediately after the data segment, large enough 290 // to handle another 2GB of mappings (256 MB), 291 // along with a reservation for an initial arena. 292 // When that gets used up, we'll start asking the kernel 293 // for any memory anywhere and hope it's in the 2GB 294 // following the bitmap (presumably the executable begins 295 // near the bottom of memory, so we'll have to use up 296 // most of memory before the kernel resorts to giving out 297 // memory before the beginning of the text segment). 298 // 299 // Alternatively we could reserve 512 MB bitmap, enough 300 // for 4GB of mappings, and then accept any memory the 301 // kernel threw at us, but normally that's a waste of 512 MB 302 // of address space, which is probably too much in a 32-bit world. 303 304 // If we fail to allocate, try again with a smaller arena. 305 // This is necessary on Android L where we share a process 306 // with ART, which reserves virtual memory aggressively. 307 arenaSizes := []uintptr{ 308 512 << 20, 309 256 << 20, 310 128 << 20, 311 } 312 313 for _, arenaSize := range arenaSizes { 314 bitmapSize = _MaxArena32 / (sys.PtrSize * 8 / 4) 315 spansSize = _MaxArena32 / _PageSize * sys.PtrSize 316 if limit > 0 && arenaSize+bitmapSize+spansSize > limit { 317 bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1) 318 arenaSize = bitmapSize * 8 319 spansSize = arenaSize / _PageSize * sys.PtrSize 320 } 321 spansSize = round(spansSize, _PageSize) 322 323 // SysReserve treats the address we ask for, end, as a hint, 324 // not as an absolute requirement. If we ask for the end 325 // of the data segment but the operating system requires 326 // a little more space before we can start allocating, it will 327 // give out a slightly higher pointer. Except QEMU, which 328 // is buggy, as usual: it won't adjust the pointer upward. 329 // So adjust it upward a little bit ourselves: 1/4 MB to get 330 // away from the running binary image and then round up 331 // to a MB boundary. 332 p = round(firstmoduledata.end+(1<<18), 1<<20) 333 pSize = bitmapSize + spansSize + arenaSize + _PageSize 334 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 335 if p != 0 { 336 break 337 } 338 } 339 if p == 0 { 340 throw("runtime: cannot reserve arena virtual address space") 341 } 342 } 343 344 // PageSize can be larger than OS definition of page size, 345 // so SysReserve can give us a PageSize-unaligned pointer. 346 // To overcome this we ask for PageSize more and round up the pointer. 347 p1 := round(p, _PageSize) 348 349 mheap_.spans = (**mspan)(unsafe.Pointer(p1)) 350 mheap_.bitmap = p1 + spansSize 351 mheap_.arena_start = p1 + (spansSize + bitmapSize) 352 mheap_.arena_used = mheap_.arena_start 353 mheap_.arena_end = p + pSize 354 mheap_.arena_reserved = reserved 355 356 if mheap_.arena_start&(_PageSize-1) != 0 { 357 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) 358 throw("misrounded allocation in mallocinit") 359 } 360 361 // Initialize the rest of the allocator. 362 mheap_.init(spansSize) 363 _g_ := getg() 364 _g_.m.mcache = allocmcache() 365 } 366 367 // sysReserveHigh reserves space somewhere high in the address space. 368 // sysReserve doesn't actually reserve the full amount requested on 369 // 64-bit systems, because of problems with ulimit. Instead it checks 370 // that it can get the first 64 kB and assumes it can grab the rest as 371 // needed. This doesn't work well with the "let the kernel pick an address" 372 // mode, so don't do that. Pick a high address instead. 373 func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer { 374 if sys.PtrSize == 4 { 375 return sysReserve(nil, n, reserved) 376 } 377 378 for i := 0; i <= 0x7f; i++ { 379 p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 380 *reserved = false 381 p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved)) 382 if p != 0 { 383 return unsafe.Pointer(p) 384 } 385 } 386 387 return sysReserve(nil, n, reserved) 388 } 389 390 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { 391 if n > h.arena_end-h.arena_used { 392 // We are in 32-bit mode, maybe we didn't use all possible address space yet. 393 // Reserve some more space. 394 p_size := round(n+_PageSize, 256<<20) 395 new_end := h.arena_end + p_size // Careful: can overflow 396 if h.arena_end <= new_end && new_end <= h.arena_start+_MaxArena32 { 397 // TODO: It would be bad if part of the arena 398 // is reserved and part is not. 399 var reserved bool 400 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) 401 if p == 0 { 402 return nil 403 } 404 if p == h.arena_end { 405 h.arena_end = new_end 406 h.arena_reserved = reserved 407 } else if h.arena_start <= p && p+p_size <= h.arena_start+_MaxArena32 { 408 // Keep everything page-aligned. 409 // Our pages are bigger than hardware pages. 410 h.arena_end = p + p_size 411 used := p + (-uintptr(p) & (_PageSize - 1)) 412 h.mapBits(used) 413 h.mapSpans(used) 414 h.arena_used = used 415 h.arena_reserved = reserved 416 } else { 417 // We haven't added this allocation to 418 // the stats, so subtract it from a 419 // fake stat (but avoid underflow). 420 stat := uint64(p_size) 421 sysFree(unsafe.Pointer(p), p_size, &stat) 422 } 423 } 424 } 425 426 if n <= h.arena_end-h.arena_used { 427 // Keep taking from our reservation. 428 p := h.arena_used 429 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) 430 h.mapBits(p + n) 431 h.mapSpans(p + n) 432 h.arena_used = p + n 433 if raceenabled { 434 racemapshadow(unsafe.Pointer(p), n) 435 } 436 437 if uintptr(p)&(_PageSize-1) != 0 { 438 throw("misrounded allocation in MHeap_SysAlloc") 439 } 440 return unsafe.Pointer(p) 441 } 442 443 // If using 64-bit, our reservation is all we have. 444 if h.arena_end-h.arena_start >= _MaxArena32 { 445 return nil 446 } 447 448 // On 32-bit, once the reservation is gone we can 449 // try to get memory at a location chosen by the OS 450 // and hope that it is in the range we allocated bitmap for. 451 p_size := round(n, _PageSize) + _PageSize 452 p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) 453 if p == 0 { 454 return nil 455 } 456 457 if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 { 458 print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n") 459 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) 460 return nil 461 } 462 463 p_end := p + p_size 464 p += -p & (_PageSize - 1) 465 if uintptr(p)+n > h.arena_used { 466 h.mapBits(p + n) 467 h.mapSpans(p + n) 468 h.arena_used = p + n 469 if p_end > h.arena_end { 470 h.arena_end = p_end 471 } 472 if raceenabled { 473 racemapshadow(unsafe.Pointer(p), n) 474 } 475 } 476 477 if uintptr(p)&(_PageSize-1) != 0 { 478 throw("misrounded allocation in MHeap_SysAlloc") 479 } 480 return unsafe.Pointer(p) 481 } 482 483 // base address for all 0-byte allocations 484 var zerobase uintptr 485 486 const ( 487 // flags to malloc 488 _FlagNoScan = 1 << 0 // GC doesn't have to scan object 489 _FlagNoZero = 1 << 1 // don't zero memory 490 ) 491 492 // Allocate an object of size bytes. 493 // Small objects are allocated from the per-P cache's free lists. 494 // Large objects (> 32 kB) are allocated straight from the heap. 495 func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { 496 if gcphase == _GCmarktermination { 497 throw("mallocgc called with gcphase == _GCmarktermination") 498 } 499 500 if size == 0 { 501 return unsafe.Pointer(&zerobase) 502 } 503 504 if flags&flagNoScan == 0 && typ == nil { 505 throw("malloc missing type") 506 } 507 508 if debug.sbrk != 0 { 509 align := uintptr(16) 510 if typ != nil { 511 align = uintptr(typ.align) 512 } 513 return persistentalloc(size, align, &memstats.other_sys) 514 } 515 516 // assistG is the G to charge for this allocation, or nil if 517 // GC is not currently active. 518 var assistG *g 519 if gcBlackenEnabled != 0 { 520 // Charge the current user G for this allocation. 521 assistG = getg() 522 if assistG.m.curg != nil { 523 assistG = assistG.m.curg 524 } 525 // Charge the allocation against the G. We'll account 526 // for internal fragmentation at the end of mallocgc. 527 assistG.gcAssistBytes -= int64(size) 528 529 if assistG.gcAssistBytes < 0 { 530 // This G is in debt. Assist the GC to correct 531 // this before allocating. This must happen 532 // before disabling preemption. 533 gcAssistAlloc(assistG) 534 } 535 } 536 537 // Set mp.mallocing to keep from being preempted by GC. 538 mp := acquirem() 539 if mp.mallocing != 0 { 540 throw("malloc deadlock") 541 } 542 if mp.gsignal == getg() { 543 throw("malloc during signal") 544 } 545 mp.mallocing = 1 546 547 shouldhelpgc := false 548 dataSize := size 549 c := gomcache() 550 var s *mspan 551 var x unsafe.Pointer 552 if size <= maxSmallSize { 553 if flags&flagNoScan != 0 && size < maxTinySize { 554 // Tiny allocator. 555 // 556 // Tiny allocator combines several tiny allocation requests 557 // into a single memory block. The resulting memory block 558 // is freed when all subobjects are unreachable. The subobjects 559 // must be FlagNoScan (don't have pointers), this ensures that 560 // the amount of potentially wasted memory is bounded. 561 // 562 // Size of the memory block used for combining (maxTinySize) is tunable. 563 // Current setting is 16 bytes, which relates to 2x worst case memory 564 // wastage (when all but one subobjects are unreachable). 565 // 8 bytes would result in no wastage at all, but provides less 566 // opportunities for combining. 567 // 32 bytes provides more opportunities for combining, 568 // but can lead to 4x worst case wastage. 569 // The best case winning is 8x regardless of block size. 570 // 571 // Objects obtained from tiny allocator must not be freed explicitly. 572 // So when an object will be freed explicitly, we ensure that 573 // its size >= maxTinySize. 574 // 575 // SetFinalizer has a special case for objects potentially coming 576 // from tiny allocator, it such case it allows to set finalizers 577 // for an inner byte of a memory block. 578 // 579 // The main targets of tiny allocator are small strings and 580 // standalone escaping variables. On a json benchmark 581 // the allocator reduces number of allocations by ~12% and 582 // reduces heap size by ~20%. 583 off := c.tinyoffset 584 // Align tiny pointer for required (conservative) alignment. 585 if size&7 == 0 { 586 off = round(off, 8) 587 } else if size&3 == 0 { 588 off = round(off, 4) 589 } else if size&1 == 0 { 590 off = round(off, 2) 591 } 592 if off+size <= maxTinySize && c.tiny != 0 { 593 // The object fits into existing tiny block. 594 x = unsafe.Pointer(c.tiny + off) 595 c.tinyoffset = off + size 596 c.local_tinyallocs++ 597 mp.mallocing = 0 598 releasem(mp) 599 return x 600 } 601 // Allocate a new maxTinySize block. 602 s = c.alloc[tinySizeClass] 603 v := s.freelist 604 if v.ptr() == nil { 605 systemstack(func() { 606 c.refill(tinySizeClass) 607 }) 608 shouldhelpgc = true 609 s = c.alloc[tinySizeClass] 610 v = s.freelist 611 } 612 s.freelist = v.ptr().next 613 s.ref++ 614 // prefetchnta offers best performance, see change list message. 615 prefetchnta(uintptr(v.ptr().next)) 616 x = unsafe.Pointer(v) 617 (*[2]uint64)(x)[0] = 0 618 (*[2]uint64)(x)[1] = 0 619 // See if we need to replace the existing tiny block with the new one 620 // based on amount of remaining free space. 621 if size < c.tinyoffset || c.tiny == 0 { 622 c.tiny = uintptr(x) 623 c.tinyoffset = size 624 } 625 size = maxTinySize 626 } else { 627 var sizeclass int8 628 if size <= 1024-8 { 629 sizeclass = size_to_class8[(size+7)>>3] 630 } else { 631 sizeclass = size_to_class128[(size-1024+127)>>7] 632 } 633 size = uintptr(class_to_size[sizeclass]) 634 s = c.alloc[sizeclass] 635 v := s.freelist 636 if v.ptr() == nil { 637 systemstack(func() { 638 c.refill(int32(sizeclass)) 639 }) 640 shouldhelpgc = true 641 s = c.alloc[sizeclass] 642 v = s.freelist 643 } 644 s.freelist = v.ptr().next 645 s.ref++ 646 // prefetchnta offers best performance, see change list message. 647 prefetchnta(uintptr(v.ptr().next)) 648 x = unsafe.Pointer(v) 649 if flags&flagNoZero == 0 { 650 v.ptr().next = 0 651 if size > 2*sys.PtrSize && ((*[2]uintptr)(x))[1] != 0 { 652 memclr(unsafe.Pointer(v), size) 653 } 654 } 655 } 656 c.local_cachealloc += size 657 } else { 658 var s *mspan 659 shouldhelpgc = true 660 systemstack(func() { 661 s = largeAlloc(size, uint32(flags)) 662 }) 663 x = unsafe.Pointer(uintptr(s.start << pageShift)) 664 size = uintptr(s.elemsize) 665 } 666 667 if flags&flagNoScan != 0 { 668 // All objects are pre-marked as noscan. Nothing to do. 669 } else { 670 // If allocating a defer+arg block, now that we've picked a malloc size 671 // large enough to hold everything, cut the "asked for" size down to 672 // just the defer header, so that the GC bitmap will record the arg block 673 // as containing nothing at all (as if it were unused space at the end of 674 // a malloc block caused by size rounding). 675 // The defer arg areas are scanned as part of scanstack. 676 if typ == deferType { 677 dataSize = unsafe.Sizeof(_defer{}) 678 } 679 heapBitsSetType(uintptr(x), size, dataSize, typ) 680 if dataSize > typ.size { 681 // Array allocation. If there are any 682 // pointers, GC has to scan to the last 683 // element. 684 if typ.ptrdata != 0 { 685 c.local_scan += dataSize - typ.size + typ.ptrdata 686 } 687 } else { 688 c.local_scan += typ.ptrdata 689 } 690 691 // Ensure that the stores above that initialize x to 692 // type-safe memory and set the heap bits occur before 693 // the caller can make x observable to the garbage 694 // collector. Otherwise, on weakly ordered machines, 695 // the garbage collector could follow a pointer to x, 696 // but see uninitialized memory or stale heap bits. 697 publicationBarrier() 698 } 699 700 // GCmarkterminate allocates black 701 // All slots hold nil so no scanning is needed. 702 // This may be racing with GC so do it atomically if there can be 703 // a race marking the bit. 704 if gcphase == _GCmarktermination || gcBlackenPromptly { 705 systemstack(func() { 706 gcmarknewobject_m(uintptr(x), size) 707 }) 708 } 709 710 if raceenabled { 711 racemalloc(x, size) 712 } 713 if msanenabled { 714 msanmalloc(x, size) 715 } 716 717 mp.mallocing = 0 718 releasem(mp) 719 720 if debug.allocfreetrace != 0 { 721 tracealloc(x, size, typ) 722 } 723 724 if rate := MemProfileRate; rate > 0 { 725 if size < uintptr(rate) && int32(size) < c.next_sample { 726 c.next_sample -= int32(size) 727 } else { 728 mp := acquirem() 729 profilealloc(mp, x, size) 730 releasem(mp) 731 } 732 } 733 734 if assistG != nil { 735 // Account for internal fragmentation in the assist 736 // debt now that we know it. 737 assistG.gcAssistBytes -= int64(size - dataSize) 738 } 739 740 if shouldhelpgc && gcShouldStart(false) { 741 gcStart(gcBackgroundMode, false) 742 } 743 744 return x 745 } 746 747 func largeAlloc(size uintptr, flag uint32) *mspan { 748 // print("largeAlloc size=", size, "\n") 749 750 if size+_PageSize < size { 751 throw("out of memory") 752 } 753 npages := size >> _PageShift 754 if size&_PageMask != 0 { 755 npages++ 756 } 757 758 // Deduct credit for this span allocation and sweep if 759 // necessary. mHeap_Alloc will also sweep npages, so this only 760 // pays the debt down to npage pages. 761 deductSweepCredit(npages*_PageSize, npages) 762 763 s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0) 764 if s == nil { 765 throw("out of memory") 766 } 767 s.limit = uintptr(s.start)<<_PageShift + size 768 heapBitsForSpan(s.base()).initSpan(s.layout()) 769 return s 770 } 771 772 // implementation of new builtin 773 func newobject(typ *_type) unsafe.Pointer { 774 flags := uint32(0) 775 if typ.kind&kindNoPointers != 0 { 776 flags |= flagNoScan 777 } 778 return mallocgc(uintptr(typ.size), typ, flags) 779 } 780 781 //go:linkname reflect_unsafe_New reflect.unsafe_New 782 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 783 return newobject(typ) 784 } 785 786 // implementation of make builtin for slices 787 func newarray(typ *_type, n uintptr) unsafe.Pointer { 788 flags := uint32(0) 789 if typ.kind&kindNoPointers != 0 { 790 flags |= flagNoScan 791 } 792 if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) { 793 panic("runtime: allocation size out of range") 794 } 795 return mallocgc(uintptr(typ.size)*n, typ, flags) 796 } 797 798 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 799 func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer { 800 return newarray(typ, n) 801 } 802 803 // rawmem returns a chunk of pointerless memory. It is 804 // not zeroed. 805 func rawmem(size uintptr) unsafe.Pointer { 806 return mallocgc(size, nil, flagNoScan|flagNoZero) 807 } 808 809 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 810 mp.mcache.next_sample = nextSample() 811 mProf_Malloc(x, size) 812 } 813 814 // nextSample returns the next sampling point for heap profiling. 815 // It produces a random variable with a geometric distribution and 816 // mean MemProfileRate. This is done by generating a uniformly 817 // distributed random number and applying the cumulative distribution 818 // function for an exponential. 819 func nextSample() int32 { 820 if GOOS == "plan9" { 821 // Plan 9 doesn't support floating point in note handler. 822 if g := getg(); g == g.m.gsignal { 823 return nextSampleNoFP() 824 } 825 } 826 827 period := MemProfileRate 828 829 // make nextSample not overflow. Maximum possible step is 830 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period. 831 switch { 832 case period > 0x7000000: 833 period = 0x7000000 834 case period == 0: 835 return 0 836 } 837 838 // Let m be the sample rate, 839 // the probability distribution function is m*exp(-mx), so the CDF is 840 // p = 1 - exp(-mx), so 841 // q = 1 - p == exp(-mx) 842 // log_e(q) = -mx 843 // -log_e(q)/m = x 844 // x = -log_e(q) * period 845 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency 846 const randomBitCount = 26 847 q := uint32(fastrand1())%(1<<randomBitCount) + 1 848 qlog := fastlog2(float64(q)) - randomBitCount 849 if qlog > 0 { 850 qlog = 0 851 } 852 const minusLog2 = -0.6931471805599453 // -ln(2) 853 return int32(qlog*(minusLog2*float64(period))) + 1 854 } 855 856 // nextSampleNoFP is similar to nextSample, but uses older, 857 // simpler code to avoid floating point. 858 func nextSampleNoFP() int32 { 859 // Set first allocation sample size. 860 rate := MemProfileRate 861 if rate > 0x3fffffff { // make 2*rate not overflow 862 rate = 0x3fffffff 863 } 864 if rate != 0 { 865 return int32(int(fastrand1()) % (2 * rate)) 866 } 867 return 0 868 } 869 870 type persistentAlloc struct { 871 base unsafe.Pointer 872 off uintptr 873 } 874 875 var globalAlloc struct { 876 mutex 877 persistentAlloc 878 } 879 880 // Wrapper around sysAlloc that can allocate small chunks. 881 // There is no associated free operation. 882 // Intended for things like function/type/debug-related persistent data. 883 // If align is 0, uses default align (currently 8). 884 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 885 var p unsafe.Pointer 886 systemstack(func() { 887 p = persistentalloc1(size, align, sysStat) 888 }) 889 return p 890 } 891 892 // Must run on system stack because stack growth can (re)invoke it. 893 // See issue 9174. 894 //go:systemstack 895 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { 896 const ( 897 chunk = 256 << 10 898 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 899 ) 900 901 if size == 0 { 902 throw("persistentalloc: size == 0") 903 } 904 if align != 0 { 905 if align&(align-1) != 0 { 906 throw("persistentalloc: align is not a power of 2") 907 } 908 if align > _PageSize { 909 throw("persistentalloc: align is too large") 910 } 911 } else { 912 align = 8 913 } 914 915 if size >= maxBlock { 916 return sysAlloc(size, sysStat) 917 } 918 919 mp := acquirem() 920 var persistent *persistentAlloc 921 if mp != nil && mp.p != 0 { 922 persistent = &mp.p.ptr().palloc 923 } else { 924 lock(&globalAlloc.mutex) 925 persistent = &globalAlloc.persistentAlloc 926 } 927 persistent.off = round(persistent.off, align) 928 if persistent.off+size > chunk || persistent.base == nil { 929 persistent.base = sysAlloc(chunk, &memstats.other_sys) 930 if persistent.base == nil { 931 if persistent == &globalAlloc.persistentAlloc { 932 unlock(&globalAlloc.mutex) 933 } 934 throw("runtime: cannot allocate memory") 935 } 936 persistent.off = 0 937 } 938 p := add(persistent.base, persistent.off) 939 persistent.off += size 940 releasem(mp) 941 if persistent == &globalAlloc.persistentAlloc { 942 unlock(&globalAlloc.mutex) 943 } 944 945 if sysStat != &memstats.other_sys { 946 mSysStatInc(sysStat, size) 947 mSysStatDec(&memstats.other_sys, size) 948 } 949 return p 950 }