github.com/AESNooper/go/src@v0.0.0-20220218095104-b56a4ab1bbbb/runtime/mpagealloc.go (about) 1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page allocator. 6 // 7 // The page allocator manages mapped pages (defined by pageSize, NOT 8 // physPageSize) for allocation and re-use. It is embedded into mheap. 9 // 10 // Pages are managed using a bitmap that is sharded into chunks. 11 // In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the 12 // process's address space. Chunks are managed in a sparse-array-style structure 13 // similar to mheap.arenas, since the bitmap may be large on some systems. 14 // 15 // The bitmap is efficiently searched by using a radix tree in combination 16 // with fast bit-wise intrinsics. Allocation is performed using an address-ordered 17 // first-fit approach. 18 // 19 // Each entry in the radix tree is a summary that describes three properties of 20 // a particular region of the address space: the number of contiguous free pages 21 // at the start and end of the region it represents, and the maximum number of 22 // contiguous free pages found anywhere in that region. 23 // 24 // Each level of the radix tree is stored as one contiguous array, which represents 25 // a different granularity of subdivision of the processes' address space. Thus, this 26 // radix tree is actually implicit in these large arrays, as opposed to having explicit 27 // dynamically-allocated pointer-based node structures. Naturally, these arrays may be 28 // quite large for system with large address spaces, so in these cases they are mapped 29 // into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk. 30 // 31 // The root level (referred to as L0 and index 0 in pageAlloc.summary) has each 32 // summary represent the largest section of address space (16 GiB on 64-bit systems), 33 // with each subsequent level representing successively smaller subsections until we 34 // reach the finest granularity at the leaves, a chunk. 35 // 36 // More specifically, each summary in each level (except for leaf summaries) 37 // represents some number of entries in the following level. For example, each 38 // summary in the root level may represent a 16 GiB region of address space, 39 // and in the next level there could be 8 corresponding entries which represent 2 40 // GiB subsections of that 16 GiB region, each of which could correspond to 8 41 // entries in the next level which each represent 256 MiB regions, and so on. 42 // 43 // Thus, this design only scales to heaps so large, but can always be extended to 44 // larger heaps by simply adding levels to the radix tree, which mostly costs 45 // additional virtual address space. The choice of managing large arrays also means 46 // that a large amount of virtual address space may be reserved by the runtime. 47 48 package runtime 49 50 import ( 51 "runtime/internal/atomic" 52 "unsafe" 53 ) 54 55 const ( 56 // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider 57 // in the bitmap at once. 58 pallocChunkPages = 1 << logPallocChunkPages 59 pallocChunkBytes = pallocChunkPages * pageSize 60 logPallocChunkPages = 9 61 logPallocChunkBytes = logPallocChunkPages + pageShift 62 63 // The number of radix bits for each level. 64 // 65 // The value of 3 is chosen such that the block of summaries we need to scan at 66 // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is 67 // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree 68 // levels perfectly into the 21-bit pallocBits summary field at the root level. 69 // 70 // The following equation explains how each of the constants relate: 71 // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits 72 // 73 // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go. 74 summaryLevelBits = 3 75 summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits 76 77 // pallocChunksL2Bits is the number of bits of the chunk index number 78 // covered by the second level of the chunks map. 79 // 80 // See (*pageAlloc).chunks for more details. Update the documentation 81 // there should this change. 82 pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits 83 pallocChunksL1Shift = pallocChunksL2Bits 84 ) 85 86 // Maximum searchAddr value, which indicates that the heap has no free space. 87 // 88 // We alias maxOffAddr just to make it clear that this is the maximum address 89 // for the page allocator's search space. See maxOffAddr for details. 90 var maxSearchAddr = maxOffAddr 91 92 // Global chunk index. 93 // 94 // Represents an index into the leaf level of the radix tree. 95 // Similar to arenaIndex, except instead of arenas, it divides the address 96 // space into chunks. 97 type chunkIdx uint 98 99 // chunkIndex returns the global index of the palloc chunk containing the 100 // pointer p. 101 func chunkIndex(p uintptr) chunkIdx { 102 return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes) 103 } 104 105 // chunkIndex returns the base address of the palloc chunk at index ci. 106 func chunkBase(ci chunkIdx) uintptr { 107 return uintptr(ci)*pallocChunkBytes + arenaBaseOffset 108 } 109 110 // chunkPageIndex computes the index of the page that contains p, 111 // relative to the chunk which contains p. 112 func chunkPageIndex(p uintptr) uint { 113 return uint(p % pallocChunkBytes / pageSize) 114 } 115 116 // l1 returns the index into the first level of (*pageAlloc).chunks. 117 func (i chunkIdx) l1() uint { 118 if pallocChunksL1Bits == 0 { 119 // Let the compiler optimize this away if there's no 120 // L1 map. 121 return 0 122 } else { 123 return uint(i) >> pallocChunksL1Shift 124 } 125 } 126 127 // l2 returns the index into the second level of (*pageAlloc).chunks. 128 func (i chunkIdx) l2() uint { 129 if pallocChunksL1Bits == 0 { 130 return uint(i) 131 } else { 132 return uint(i) & (1<<pallocChunksL2Bits - 1) 133 } 134 } 135 136 // offAddrToLevelIndex converts an address in the offset address space 137 // to the index into summary[level] containing addr. 138 func offAddrToLevelIndex(level int, addr offAddr) int { 139 return int((addr.a - arenaBaseOffset) >> levelShift[level]) 140 } 141 142 // levelIndexToOffAddr converts an index into summary[level] into 143 // the corresponding address in the offset address space. 144 func levelIndexToOffAddr(level, idx int) offAddr { 145 return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset} 146 } 147 148 // addrsToSummaryRange converts base and limit pointers into a range 149 // of entries for the given summary level. 150 // 151 // The returned range is inclusive on the lower bound and exclusive on 152 // the upper bound. 153 func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) { 154 // This is slightly more nuanced than just a shift for the exclusive 155 // upper-bound. Note that the exclusive upper bound may be within a 156 // summary at this level, meaning if we just do the obvious computation 157 // hi will end up being an inclusive upper bound. Unfortunately, just 158 // adding 1 to that is too broad since we might be on the very edge 159 // of a summary's max page count boundary for this level 160 // (1 << levelLogPages[level]). So, make limit an inclusive upper bound 161 // then shift, then add 1, so we get an exclusive upper bound at the end. 162 lo = int((base - arenaBaseOffset) >> levelShift[level]) 163 hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1 164 return 165 } 166 167 // blockAlignSummaryRange aligns indices into the given level to that 168 // level's block width (1 << levelBits[level]). It assumes lo is inclusive 169 // and hi is exclusive, and so aligns them down and up respectively. 170 func blockAlignSummaryRange(level int, lo, hi int) (int, int) { 171 e := uintptr(1) << levelBits[level] 172 return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e)) 173 } 174 175 type pageAlloc struct { 176 // Radix tree of summaries. 177 // 178 // Each slice's cap represents the whole memory reservation. 179 // Each slice's len reflects the allocator's maximum known 180 // mapped heap address for that level. 181 // 182 // The backing store of each summary level is reserved in init 183 // and may or may not be committed in grow (small address spaces 184 // may commit all the memory in init). 185 // 186 // The purpose of keeping len <= cap is to enforce bounds checks 187 // on the top end of the slice so that instead of an unknown 188 // runtime segmentation fault, we get a much friendlier out-of-bounds 189 // error. 190 // 191 // To iterate over a summary level, use inUse to determine which ranges 192 // are currently available. Otherwise one might try to access 193 // memory which is only Reserved which may result in a hard fault. 194 // 195 // We may still get segmentation faults < len since some of that 196 // memory may not be committed yet. 197 summary [summaryLevels][]pallocSum 198 199 // chunks is a slice of bitmap chunks. 200 // 201 // The total size of chunks is quite large on most 64-bit platforms 202 // (O(GiB) or more) if flattened, so rather than making one large mapping 203 // (which has problems on some platforms, even when PROT_NONE) we use a 204 // two-level sparse array approach similar to the arena index in mheap. 205 // 206 // To find the chunk containing a memory address `a`, do: 207 // chunkOf(chunkIndex(a)) 208 // 209 // Below is a table describing the configuration for chunks for various 210 // heapAddrBits supported by the runtime. 211 // 212 // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size 213 // ------------------------------------------------ 214 // 32 | 0 | 10 | 128 KiB 215 // 33 (iOS) | 0 | 11 | 256 KiB 216 // 48 | 13 | 13 | 1 MiB 217 // 218 // There's no reason to use the L1 part of chunks on 32-bit, the 219 // address space is small so the L2 is small. For platforms with a 220 // 48-bit address space, we pick the L1 such that the L2 is 1 MiB 221 // in size, which is a good balance between low granularity without 222 // making the impact on BSS too high (note the L1 is stored directly 223 // in pageAlloc). 224 // 225 // To iterate over the bitmap, use inUse to determine which ranges 226 // are currently available. Otherwise one might iterate over unused 227 // ranges. 228 // 229 // Protected by mheapLock. 230 // 231 // TODO(mknyszek): Consider changing the definition of the bitmap 232 // such that 1 means free and 0 means in-use so that summaries and 233 // the bitmaps align better on zero-values. 234 chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData 235 236 // The address to start an allocation search with. It must never 237 // point to any memory that is not contained in inUse, i.e. 238 // inUse.contains(searchAddr.addr()) must always be true. The one 239 // exception to this rule is that it may take on the value of 240 // maxOffAddr to indicate that the heap is exhausted. 241 // 242 // We guarantee that all valid heap addresses below this value 243 // are allocated and not worth searching. 244 searchAddr offAddr 245 246 // start and end represent the chunk indices 247 // which pageAlloc knows about. It assumes 248 // chunks in the range [start, end) are 249 // currently ready to use. 250 start, end chunkIdx 251 252 // inUse is a slice of ranges of address space which are 253 // known by the page allocator to be currently in-use (passed 254 // to grow). 255 // 256 // This field is currently unused on 32-bit architectures but 257 // is harmless to track. We care much more about having a 258 // contiguous heap in these cases and take additional measures 259 // to ensure that, so in nearly all cases this should have just 260 // 1 element. 261 // 262 // All access is protected by the mheapLock. 263 inUse addrRanges 264 265 // scav stores the scavenger state. 266 scav struct { 267 lock mutex 268 269 // inUse is a slice of ranges of address space which have not 270 // yet been looked at by the scavenger. 271 // 272 // Protected by lock. 273 inUse addrRanges 274 275 // gen is the scavenge generation number. 276 // 277 // Protected by lock. 278 gen uint32 279 280 // reservationBytes is how large of a reservation should be made 281 // in bytes of address space for each scavenge iteration. 282 // 283 // Protected by lock. 284 reservationBytes uintptr 285 286 // released is the amount of memory released this generation. 287 // 288 // Updated atomically. 289 released uintptr 290 291 // scavLWM is the lowest (offset) address that the scavenger reached this 292 // scavenge generation. 293 // 294 // Protected by lock. 295 scavLWM offAddr 296 297 // freeHWM is the highest (offset) address of a page that was freed to 298 // the page allocator this scavenge generation. 299 // 300 // Protected by mheapLock. 301 freeHWM offAddr 302 } 303 304 // mheap_.lock. This level of indirection makes it possible 305 // to test pageAlloc indepedently of the runtime allocator. 306 mheapLock *mutex 307 308 // sysStat is the runtime memstat to update when new system 309 // memory is committed by the pageAlloc for allocation metadata. 310 sysStat *sysMemStat 311 312 // Whether or not this struct is being used in tests. 313 test bool 314 } 315 316 func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) { 317 if levelLogPages[0] > logMaxPackedValue { 318 // We can't represent 1<<levelLogPages[0] pages, the maximum number 319 // of pages we need to represent at the root level, in a summary, which 320 // is a big problem. Throw. 321 print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n") 322 print("runtime: summary max pages = ", maxPackedValue, "\n") 323 throw("root level max pages doesn't fit in summary") 324 } 325 p.sysStat = sysStat 326 327 // Initialize p.inUse. 328 p.inUse.init(sysStat) 329 330 // System-dependent initialization. 331 p.sysInit() 332 333 // Start with the searchAddr in a state indicating there's no free memory. 334 p.searchAddr = maxSearchAddr 335 336 // Set the mheapLock. 337 p.mheapLock = mheapLock 338 339 // Initialize scavenge tracking state. 340 p.scav.scavLWM = maxSearchAddr 341 } 342 343 // tryChunkOf returns the bitmap data for the given chunk. 344 // 345 // Returns nil if the chunk data has not been mapped. 346 func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { 347 l2 := p.chunks[ci.l1()] 348 if l2 == nil { 349 return nil 350 } 351 return &l2[ci.l2()] 352 } 353 354 // chunkOf returns the chunk at the given chunk index. 355 // 356 // The chunk index must be valid or this method may throw. 357 func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData { 358 return &p.chunks[ci.l1()][ci.l2()] 359 } 360 361 // grow sets up the metadata for the address range [base, base+size). 362 // It may allocate metadata, in which case *p.sysStat will be updated. 363 // 364 // p.mheapLock must be held. 365 func (p *pageAlloc) grow(base, size uintptr) { 366 assertLockHeld(p.mheapLock) 367 368 // Round up to chunks, since we can't deal with increments smaller 369 // than chunks. Also, sysGrow expects aligned values. 370 limit := alignUp(base+size, pallocChunkBytes) 371 base = alignDown(base, pallocChunkBytes) 372 373 // Grow the summary levels in a system-dependent manner. 374 // We just update a bunch of additional metadata here. 375 p.sysGrow(base, limit) 376 377 // Update p.start and p.end. 378 // If no growth happened yet, start == 0. This is generally 379 // safe since the zero page is unmapped. 380 firstGrowth := p.start == 0 381 start, end := chunkIndex(base), chunkIndex(limit) 382 if firstGrowth || start < p.start { 383 p.start = start 384 } 385 if end > p.end { 386 p.end = end 387 } 388 // Note that [base, limit) will never overlap with any existing 389 // range inUse because grow only ever adds never-used memory 390 // regions to the page allocator. 391 p.inUse.add(makeAddrRange(base, limit)) 392 393 // A grow operation is a lot like a free operation, so if our 394 // chunk ends up below p.searchAddr, update p.searchAddr to the 395 // new address, just like in free. 396 if b := (offAddr{base}); b.lessThan(p.searchAddr) { 397 p.searchAddr = b 398 } 399 400 // Add entries into chunks, which is sparse, if needed. Then, 401 // initialize the bitmap. 402 // 403 // Newly-grown memory is always considered scavenged. 404 // Set all the bits in the scavenged bitmaps high. 405 for c := chunkIndex(base); c < chunkIndex(limit); c++ { 406 if p.chunks[c.l1()] == nil { 407 // Create the necessary l2 entry. 408 // 409 // Store it atomically to avoid races with readers which 410 // don't acquire the heap lock. 411 r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat) 412 if r == nil { 413 throw("pageAlloc: out of memory") 414 } 415 atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r) 416 } 417 p.chunkOf(c).scavenged.setRange(0, pallocChunkPages) 418 } 419 420 // Update summaries accordingly. The grow acts like a free, so 421 // we need to ensure this newly-free memory is visible in the 422 // summaries. 423 p.update(base, size/pageSize, true, false) 424 } 425 426 // update updates heap metadata. It must be called each time the bitmap 427 // is updated. 428 // 429 // If contig is true, update does some optimizations assuming that there was 430 // a contiguous allocation or free between addr and addr+npages. alloc indicates 431 // whether the operation performed was an allocation or a free. 432 // 433 // p.mheapLock must be held. 434 func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) { 435 assertLockHeld(p.mheapLock) 436 437 // base, limit, start, and end are inclusive. 438 limit := base + npages*pageSize - 1 439 sc, ec := chunkIndex(base), chunkIndex(limit) 440 441 // Handle updating the lowest level first. 442 if sc == ec { 443 // Fast path: the allocation doesn't span more than one chunk, 444 // so update this one and if the summary didn't change, return. 445 x := p.summary[len(p.summary)-1][sc] 446 y := p.chunkOf(sc).summarize() 447 if x == y { 448 return 449 } 450 p.summary[len(p.summary)-1][sc] = y 451 } else if contig { 452 // Slow contiguous path: the allocation spans more than one chunk 453 // and at least one summary is guaranteed to change. 454 summary := p.summary[len(p.summary)-1] 455 456 // Update the summary for chunk sc. 457 summary[sc] = p.chunkOf(sc).summarize() 458 459 // Update the summaries for chunks in between, which are 460 // either totally allocated or freed. 461 whole := p.summary[len(p.summary)-1][sc+1 : ec] 462 if alloc { 463 // Should optimize into a memclr. 464 for i := range whole { 465 whole[i] = 0 466 } 467 } else { 468 for i := range whole { 469 whole[i] = freeChunkSum 470 } 471 } 472 473 // Update the summary for chunk ec. 474 summary[ec] = p.chunkOf(ec).summarize() 475 } else { 476 // Slow general path: the allocation spans more than one chunk 477 // and at least one summary is guaranteed to change. 478 // 479 // We can't assume a contiguous allocation happened, so walk over 480 // every chunk in the range and manually recompute the summary. 481 summary := p.summary[len(p.summary)-1] 482 for c := sc; c <= ec; c++ { 483 summary[c] = p.chunkOf(c).summarize() 484 } 485 } 486 487 // Walk up the radix tree and update the summaries appropriately. 488 changed := true 489 for l := len(p.summary) - 2; l >= 0 && changed; l-- { 490 // Update summaries at level l from summaries at level l+1. 491 changed = false 492 493 // "Constants" for the previous level which we 494 // need to compute the summary from that level. 495 logEntriesPerBlock := levelBits[l+1] 496 logMaxPages := levelLogPages[l+1] 497 498 // lo and hi describe all the parts of the level we need to look at. 499 lo, hi := addrsToSummaryRange(l, base, limit+1) 500 501 // Iterate over each block, updating the corresponding summary in the less-granular level. 502 for i := lo; i < hi; i++ { 503 children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock] 504 sum := mergeSummaries(children, logMaxPages) 505 old := p.summary[l][i] 506 if old != sum { 507 changed = true 508 p.summary[l][i] = sum 509 } 510 } 511 } 512 } 513 514 // allocRange marks the range of memory [base, base+npages*pageSize) as 515 // allocated. It also updates the summaries to reflect the newly-updated 516 // bitmap. 517 // 518 // Returns the amount of scavenged memory in bytes present in the 519 // allocated range. 520 // 521 // p.mheapLock must be held. 522 func (p *pageAlloc) allocRange(base, npages uintptr) uintptr { 523 assertLockHeld(p.mheapLock) 524 525 limit := base + npages*pageSize - 1 526 sc, ec := chunkIndex(base), chunkIndex(limit) 527 si, ei := chunkPageIndex(base), chunkPageIndex(limit) 528 529 scav := uint(0) 530 if sc == ec { 531 // The range doesn't cross any chunk boundaries. 532 chunk := p.chunkOf(sc) 533 scav += chunk.scavenged.popcntRange(si, ei+1-si) 534 chunk.allocRange(si, ei+1-si) 535 } else { 536 // The range crosses at least one chunk boundary. 537 chunk := p.chunkOf(sc) 538 scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si) 539 chunk.allocRange(si, pallocChunkPages-si) 540 for c := sc + 1; c < ec; c++ { 541 chunk := p.chunkOf(c) 542 scav += chunk.scavenged.popcntRange(0, pallocChunkPages) 543 chunk.allocAll() 544 } 545 chunk = p.chunkOf(ec) 546 scav += chunk.scavenged.popcntRange(0, ei+1) 547 chunk.allocRange(0, ei+1) 548 } 549 p.update(base, npages, true, true) 550 return uintptr(scav) * pageSize 551 } 552 553 // findMappedAddr returns the smallest mapped offAddr that is 554 // >= addr. That is, if addr refers to mapped memory, then it is 555 // returned. If addr is higher than any mapped region, then 556 // it returns maxOffAddr. 557 // 558 // p.mheapLock must be held. 559 func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr { 560 assertLockHeld(p.mheapLock) 561 562 // If we're not in a test, validate first by checking mheap_.arenas. 563 // This is a fast path which is only safe to use outside of testing. 564 ai := arenaIndex(addr.addr()) 565 if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil { 566 vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr()) 567 if ok { 568 return offAddr{vAddr} 569 } else { 570 // The candidate search address is greater than any 571 // known address, which means we definitely have no 572 // free memory left. 573 return maxOffAddr 574 } 575 } 576 return addr 577 } 578 579 // find searches for the first (address-ordered) contiguous free region of 580 // npages in size and returns a base address for that region. 581 // 582 // It uses p.searchAddr to prune its search and assumes that no palloc chunks 583 // below chunkIndex(p.searchAddr) contain any free memory at all. 584 // 585 // find also computes and returns a candidate p.searchAddr, which may or 586 // may not prune more of the address space than p.searchAddr already does. 587 // This candidate is always a valid p.searchAddr. 588 // 589 // find represents the slow path and the full radix tree search. 590 // 591 // Returns a base address of 0 on failure, in which case the candidate 592 // searchAddr returned is invalid and must be ignored. 593 // 594 // p.mheapLock must be held. 595 func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) { 596 assertLockHeld(p.mheapLock) 597 598 // Search algorithm. 599 // 600 // This algorithm walks each level l of the radix tree from the root level 601 // to the leaf level. It iterates over at most 1 << levelBits[l] of entries 602 // in a given level in the radix tree, and uses the summary information to 603 // find either: 604 // 1) That a given subtree contains a large enough contiguous region, at 605 // which point it continues iterating on the next level, or 606 // 2) That there are enough contiguous boundary-crossing bits to satisfy 607 // the allocation, at which point it knows exactly where to start 608 // allocating from. 609 // 610 // i tracks the index into the current level l's structure for the 611 // contiguous 1 << levelBits[l] entries we're actually interested in. 612 // 613 // NOTE: Technically this search could allocate a region which crosses 614 // the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is 615 // a discontinuity. However, the only way this could happen is if the 616 // page at the zero address is mapped, and this is impossible on 617 // every system we support where arenaBaseOffset != 0. So, the 618 // discontinuity is already encoded in the fact that the OS will never 619 // map the zero page for us, and this function doesn't try to handle 620 // this case in any way. 621 622 // i is the beginning of the block of entries we're searching at the 623 // current level. 624 i := 0 625 626 // firstFree is the region of address space that we are certain to 627 // find the first free page in the heap. base and bound are the inclusive 628 // bounds of this window, and both are addresses in the linearized, contiguous 629 // view of the address space (with arenaBaseOffset pre-added). At each level, 630 // this window is narrowed as we find the memory region containing the 631 // first free page of memory. To begin with, the range reflects the 632 // full process address space. 633 // 634 // firstFree is updated by calling foundFree each time free space in the 635 // heap is discovered. 636 // 637 // At the end of the search, base.addr() is the best new 638 // searchAddr we could deduce in this search. 639 firstFree := struct { 640 base, bound offAddr 641 }{ 642 base: minOffAddr, 643 bound: maxOffAddr, 644 } 645 // foundFree takes the given address range [addr, addr+size) and 646 // updates firstFree if it is a narrower range. The input range must 647 // either be fully contained within firstFree or not overlap with it 648 // at all. 649 // 650 // This way, we'll record the first summary we find with any free 651 // pages on the root level and narrow that down if we descend into 652 // that summary. But as soon as we need to iterate beyond that summary 653 // in a level to find a large enough range, we'll stop narrowing. 654 foundFree := func(addr offAddr, size uintptr) { 655 if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) { 656 // This range fits within the current firstFree window, so narrow 657 // down the firstFree window to the base and bound of this range. 658 firstFree.base = addr 659 firstFree.bound = addr.add(size - 1) 660 } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) { 661 // This range only partially overlaps with the firstFree range, 662 // so throw. 663 print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n") 664 print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n") 665 throw("range partially overlaps") 666 } 667 } 668 669 // lastSum is the summary which we saw on the previous level that made us 670 // move on to the next level. Used to print additional information in the 671 // case of a catastrophic failure. 672 // lastSumIdx is that summary's index in the previous level. 673 lastSum := packPallocSum(0, 0, 0) 674 lastSumIdx := -1 675 676 nextLevel: 677 for l := 0; l < len(p.summary); l++ { 678 // For the root level, entriesPerBlock is the whole level. 679 entriesPerBlock := 1 << levelBits[l] 680 logMaxPages := levelLogPages[l] 681 682 // We've moved into a new level, so let's update i to our new 683 // starting index. This is a no-op for level 0. 684 i <<= levelBits[l] 685 686 // Slice out the block of entries we care about. 687 entries := p.summary[l][i : i+entriesPerBlock] 688 689 // Determine j0, the first index we should start iterating from. 690 // The searchAddr may help us eliminate iterations if we followed the 691 // searchAddr on the previous level or we're on the root leve, in which 692 // case the searchAddr should be the same as i after levelShift. 693 j0 := 0 694 if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i { 695 j0 = searchIdx & (entriesPerBlock - 1) 696 } 697 698 // Run over the level entries looking for 699 // a contiguous run of at least npages either 700 // within an entry or across entries. 701 // 702 // base contains the page index (relative to 703 // the first entry's first page) of the currently 704 // considered run of consecutive pages. 705 // 706 // size contains the size of the currently considered 707 // run of consecutive pages. 708 var base, size uint 709 for j := j0; j < len(entries); j++ { 710 sum := entries[j] 711 if sum == 0 { 712 // A full entry means we broke any streak and 713 // that we should skip it altogether. 714 size = 0 715 continue 716 } 717 718 // We've encountered a non-zero summary which means 719 // free memory, so update firstFree. 720 foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize) 721 722 s := sum.start() 723 if size+s >= uint(npages) { 724 // If size == 0 we don't have a run yet, 725 // which means base isn't valid. So, set 726 // base to the first page in this block. 727 if size == 0 { 728 base = uint(j) << logMaxPages 729 } 730 // We hit npages; we're done! 731 size += s 732 break 733 } 734 if sum.max() >= uint(npages) { 735 // The entry itself contains npages contiguous 736 // free pages, so continue on the next level 737 // to find that run. 738 i += j 739 lastSumIdx = i 740 lastSum = sum 741 continue nextLevel 742 } 743 if size == 0 || s < 1<<logMaxPages { 744 // We either don't have a current run started, or this entry 745 // isn't totally free (meaning we can't continue the current 746 // one), so try to begin a new run by setting size and base 747 // based on sum.end. 748 size = sum.end() 749 base = uint(j+1)<<logMaxPages - size 750 continue 751 } 752 // The entry is completely free, so continue the run. 753 size += 1 << logMaxPages 754 } 755 if size >= uint(npages) { 756 // We found a sufficiently large run of free pages straddling 757 // some boundary, so compute the address and return it. 758 addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr() 759 return addr, p.findMappedAddr(firstFree.base) 760 } 761 if l == 0 { 762 // We're at level zero, so that means we've exhausted our search. 763 return 0, maxSearchAddr 764 } 765 766 // We're not at level zero, and we exhausted the level we were looking in. 767 // This means that either our calculations were wrong or the level above 768 // lied to us. In either case, dump some useful state and throw. 769 print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n") 770 print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n") 771 print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n") 772 print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n") 773 for j := 0; j < len(entries); j++ { 774 sum := entries[j] 775 print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") 776 } 777 throw("bad summary data") 778 } 779 780 // Since we've gotten to this point, that means we haven't found a 781 // sufficiently-sized free region straddling some boundary (chunk or larger). 782 // This means the last summary we inspected must have had a large enough "max" 783 // value, so look inside the chunk to find a suitable run. 784 // 785 // After iterating over all levels, i must contain a chunk index which 786 // is what the final level represents. 787 ci := chunkIdx(i) 788 j, searchIdx := p.chunkOf(ci).find(npages, 0) 789 if j == ^uint(0) { 790 // We couldn't find any space in this chunk despite the summaries telling 791 // us it should be there. There's likely a bug, so dump some state and throw. 792 sum := p.summary[len(p.summary)-1][i] 793 print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") 794 print("runtime: npages = ", npages, "\n") 795 throw("bad summary data") 796 } 797 798 // Compute the address at which the free space starts. 799 addr := chunkBase(ci) + uintptr(j)*pageSize 800 801 // Since we actually searched the chunk, we may have 802 // found an even narrower free window. 803 searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize 804 foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr) 805 return addr, p.findMappedAddr(firstFree.base) 806 } 807 808 // alloc allocates npages worth of memory from the page heap, returning the base 809 // address for the allocation and the amount of scavenged memory in bytes 810 // contained in the region [base address, base address + npages*pageSize). 811 // 812 // Returns a 0 base address on failure, in which case other returned values 813 // should be ignored. 814 // 815 // p.mheapLock must be held. 816 // 817 // Must run on the system stack because p.mheapLock must be held. 818 // 819 //go:systemstack 820 func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { 821 assertLockHeld(p.mheapLock) 822 823 // If the searchAddr refers to a region which has a higher address than 824 // any known chunk, then we know we're out of memory. 825 if chunkIndex(p.searchAddr.addr()) >= p.end { 826 return 0, 0 827 } 828 829 // If npages has a chance of fitting in the chunk where the searchAddr is, 830 // search it directly. 831 searchAddr := minOffAddr 832 if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) { 833 // npages is guaranteed to be no greater than pallocChunkPages here. 834 i := chunkIndex(p.searchAddr.addr()) 835 if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) { 836 j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr())) 837 if j == ^uint(0) { 838 print("runtime: max = ", max, ", npages = ", npages, "\n") 839 print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n") 840 throw("bad summary data") 841 } 842 addr = chunkBase(i) + uintptr(j)*pageSize 843 searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize} 844 goto Found 845 } 846 } 847 // We failed to use a searchAddr for one reason or another, so try 848 // the slow path. 849 addr, searchAddr = p.find(npages) 850 if addr == 0 { 851 if npages == 1 { 852 // We failed to find a single free page, the smallest unit 853 // of allocation. This means we know the heap is completely 854 // exhausted. Otherwise, the heap still might have free 855 // space in it, just not enough contiguous space to 856 // accommodate npages. 857 p.searchAddr = maxSearchAddr 858 } 859 return 0, 0 860 } 861 Found: 862 // Go ahead and actually mark the bits now that we have an address. 863 scav = p.allocRange(addr, npages) 864 865 // If we found a higher searchAddr, we know that all the 866 // heap memory before that searchAddr in an offset address space is 867 // allocated, so bump p.searchAddr up to the new one. 868 if p.searchAddr.lessThan(searchAddr) { 869 p.searchAddr = searchAddr 870 } 871 return addr, scav 872 } 873 874 // free returns npages worth of memory starting at base back to the page heap. 875 // 876 // p.mheapLock must be held. 877 // 878 // Must run on the system stack because p.mheapLock must be held. 879 // 880 //go:systemstack 881 func (p *pageAlloc) free(base, npages uintptr, scavenged bool) { 882 assertLockHeld(p.mheapLock) 883 884 // If we're freeing pages below the p.searchAddr, update searchAddr. 885 if b := (offAddr{base}); b.lessThan(p.searchAddr) { 886 p.searchAddr = b 887 } 888 limit := base + npages*pageSize - 1 889 if !scavenged { 890 // Update the free high watermark for the scavenger. 891 if offLimit := (offAddr{limit}); p.scav.freeHWM.lessThan(offLimit) { 892 p.scav.freeHWM = offLimit 893 } 894 } 895 if npages == 1 { 896 // Fast path: we're clearing a single bit, and we know exactly 897 // where it is, so mark it directly. 898 i := chunkIndex(base) 899 p.chunkOf(i).free1(chunkPageIndex(base)) 900 } else { 901 // Slow path: we're clearing more bits so we may need to iterate. 902 sc, ec := chunkIndex(base), chunkIndex(limit) 903 si, ei := chunkPageIndex(base), chunkPageIndex(limit) 904 905 if sc == ec { 906 // The range doesn't cross any chunk boundaries. 907 p.chunkOf(sc).free(si, ei+1-si) 908 } else { 909 // The range crosses at least one chunk boundary. 910 p.chunkOf(sc).free(si, pallocChunkPages-si) 911 for c := sc + 1; c < ec; c++ { 912 p.chunkOf(c).freeAll() 913 } 914 p.chunkOf(ec).free(0, ei+1) 915 } 916 } 917 p.update(base, npages, true, false) 918 } 919 920 const ( 921 pallocSumBytes = unsafe.Sizeof(pallocSum(0)) 922 923 // maxPackedValue is the maximum value that any of the three fields in 924 // the pallocSum may take on. 925 maxPackedValue = 1 << logMaxPackedValue 926 logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits 927 928 freeChunkSum = pallocSum(uint64(pallocChunkPages) | 929 uint64(pallocChunkPages<<logMaxPackedValue) | 930 uint64(pallocChunkPages<<(2*logMaxPackedValue))) 931 ) 932 933 // pallocSum is a packed summary type which packs three numbers: start, max, 934 // and end into a single 8-byte value. Each of these values are a summary of 935 // a bitmap and are thus counts, each of which may have a maximum value of 936 // 2^21 - 1, or all three may be equal to 2^21. The latter case is represented 937 // by just setting the 64th bit. 938 type pallocSum uint64 939 940 // packPallocSum takes a start, max, and end value and produces a pallocSum. 941 func packPallocSum(start, max, end uint) pallocSum { 942 if max == maxPackedValue { 943 return pallocSum(uint64(1 << 63)) 944 } 945 return pallocSum((uint64(start) & (maxPackedValue - 1)) | 946 ((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) | 947 ((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue))) 948 } 949 950 // start extracts the start value from a packed sum. 951 func (p pallocSum) start() uint { 952 if uint64(p)&uint64(1<<63) != 0 { 953 return maxPackedValue 954 } 955 return uint(uint64(p) & (maxPackedValue - 1)) 956 } 957 958 // max extracts the max value from a packed sum. 959 func (p pallocSum) max() uint { 960 if uint64(p)&uint64(1<<63) != 0 { 961 return maxPackedValue 962 } 963 return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)) 964 } 965 966 // end extracts the end value from a packed sum. 967 func (p pallocSum) end() uint { 968 if uint64(p)&uint64(1<<63) != 0 { 969 return maxPackedValue 970 } 971 return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) 972 } 973 974 // unpack unpacks all three values from the summary. 975 func (p pallocSum) unpack() (uint, uint, uint) { 976 if uint64(p)&uint64(1<<63) != 0 { 977 return maxPackedValue, maxPackedValue, maxPackedValue 978 } 979 return uint(uint64(p) & (maxPackedValue - 1)), 980 uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)), 981 uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) 982 } 983 984 // mergeSummaries merges consecutive summaries which may each represent at 985 // most 1 << logMaxPagesPerSum pages each together into one. 986 func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum { 987 // Merge the summaries in sums into one. 988 // 989 // We do this by keeping a running summary representing the merged 990 // summaries of sums[:i] in start, max, and end. 991 start, max, end := sums[0].unpack() 992 for i := 1; i < len(sums); i++ { 993 // Merge in sums[i]. 994 si, mi, ei := sums[i].unpack() 995 996 // Merge in sums[i].start only if the running summary is 997 // completely free, otherwise this summary's start 998 // plays no role in the combined sum. 999 if start == uint(i)<<logMaxPagesPerSum { 1000 start += si 1001 } 1002 1003 // Recompute the max value of the running sum by looking 1004 // across the boundary between the running sum and sums[i] 1005 // and at the max sums[i], taking the greatest of those two 1006 // and the max of the running sum. 1007 if end+si > max { 1008 max = end + si 1009 } 1010 if mi > max { 1011 max = mi 1012 } 1013 1014 // Merge in end by checking if this new summary is totally 1015 // free. If it is, then we want to extend the running sum's 1016 // end by the new summary. If not, then we have some alloc'd 1017 // pages in there and we just want to take the end value in 1018 // sums[i]. 1019 if ei == 1<<logMaxPagesPerSum { 1020 end += 1 << logMaxPagesPerSum 1021 } else { 1022 end = ei 1023 } 1024 } 1025 return packPallocSum(start, max, end) 1026 }