github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/runtime/mheap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // Main malloc heap. 18 // The heap itself is the "free[]" and "large" arrays, 19 // but all the other global data is here too. 20 type mheap struct { 21 lock mutex 22 free [_MaxMHeapList]mSpanList // free lists of given length 23 freelarge mSpanList // free lists length >= _MaxMHeapList 24 busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length 25 busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList 26 allspans **mspan // all spans out there 27 gcspans **mspan // copy of allspans referenced by gc marker or sweeper 28 nspan uint32 29 sweepgen uint32 // sweep generation, see comment in mspan 30 sweepdone uint32 // all spans are swept 31 // span lookup 32 spans **mspan 33 spans_mapped uintptr 34 35 // Proportional sweep 36 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock 37 spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically 38 pagesSwept uint64 // pages swept this cycle; updated atomically 39 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 40 // TODO(austin): pagesInUse should be a uintptr, but the 386 41 // compiler can't 8-byte align fields. 42 43 // Malloc stats. 44 largefree uint64 // bytes freed for large objects (>maxsmallsize) 45 nlargefree uint64 // number of frees for large objects (>maxsmallsize) 46 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) 47 48 // range of addresses we might see in the heap 49 bitmap uintptr // Points to one byte past the end of the bitmap 50 bitmap_mapped uintptr 51 arena_start uintptr 52 arena_used uintptr // always mHeap_Map{Bits,Spans} before updating 53 arena_end uintptr 54 arena_reserved bool 55 56 // central free lists for small size classes. 57 // the padding makes sure that the MCentrals are 58 // spaced CacheLineSize bytes apart, so that each MCentral.lock 59 // gets its own cache line. 60 central [_NumSizeClasses]struct { 61 mcentral mcentral 62 pad [sys.CacheLineSize]byte 63 } 64 65 spanalloc fixalloc // allocator for span* 66 cachealloc fixalloc // allocator for mcache* 67 specialfinalizeralloc fixalloc // allocator for specialfinalizer* 68 specialprofilealloc fixalloc // allocator for specialprofile* 69 speciallock mutex // lock for special record allocators. 70 } 71 72 var mheap_ mheap 73 74 // An MSpan is a run of pages. 75 // 76 // When a MSpan is in the heap free list, state == MSpanFree 77 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 78 // 79 // When a MSpan is allocated, state == MSpanInUse or MSpanStack 80 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 81 82 // Every MSpan is in one doubly-linked list, 83 // either one of the MHeap's free lists or one of the 84 // MCentral's span lists. 85 86 // An MSpan representing actual memory has state _MSpanInUse, 87 // _MSpanStack, or _MSpanFree. Transitions between these states are 88 // constrained as follows: 89 // 90 // * A span may transition from free to in-use or stack during any GC 91 // phase. 92 // 93 // * During sweeping (gcphase == _GCoff), a span may transition from 94 // in-use to free (as a result of sweeping) or stack to free (as a 95 // result of stacks being freed). 96 // 97 // * During GC (gcphase != _GCoff), a span *must not* transition from 98 // stack or in-use to free. Because concurrent GC may read a pointer 99 // and then look up its span, the span state must be monotonic. 100 const ( 101 _MSpanInUse = iota // allocated for garbage collected heap 102 _MSpanStack // allocated for use by stack allocator 103 _MSpanFree 104 _MSpanDead 105 ) 106 107 // mSpanList heads a linked list of spans. 108 // 109 // Linked list structure is based on BSD's "tail queue" data structure. 110 type mSpanList struct { 111 first *mspan // first span in list, or nil if none 112 last **mspan // last span's next field, or first if none 113 } 114 115 type mspan struct { 116 next *mspan // next span in list, or nil if none 117 prev **mspan // previous span's next field, or list head's first field if none 118 list *mSpanList // For debugging. TODO: Remove. 119 120 startAddr uintptr // address of first byte of span aka s.base() 121 npages uintptr // number of pages in span 122 stackfreelist gclinkptr // list of free stacks, avoids overloading freelist 123 124 // freeindex is the slot index between 0 and nelems at which to begin scanning 125 // for the next free object in this span. 126 // Each allocation scans allocBits starting at freeindex until it encounters a 0 127 // indicating a free object. freeindex is then adjusted so that subsequent scans begin 128 // just past the the newly discovered free object. 129 // 130 // If freeindex == nelem, this span has no free objects. 131 // 132 // allocBits is a bitmap of objects in this span. 133 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 134 // then object n is free; 135 // otherwise, object n is allocated. Bits starting at nelem are 136 // undefined and should never be referenced. 137 // 138 // Object n starts at address n*elemsize + (start << pageShift). 139 freeindex uintptr 140 // TODO: Look up nelems from sizeclass and remove this field if it 141 // helps performance. 142 nelems uintptr // number of object in the span. 143 144 // Cache of the allocBits at freeindex. allocCache is shifted 145 // such that the lowest bit corresponds to the bit freeindex. 146 // allocCache holds the complement of allocBits, thus allowing 147 // ctz (count trailing zero) to use it directly. 148 // allocCache may contain bits beyond s.nelems; the caller must ignore 149 // these. 150 allocCache uint64 151 152 // allocBits and gcmarkBits hold pointers to a span's mark and 153 // allocation bits. The pointers are 8 byte aligned. 154 // There are three arenas where this data is held. 155 // free: Dirty arenas that are no longer accessed 156 // and can be reused. 157 // next: Holds information to be used in the next GC cycle. 158 // current: Information being used during this GC cycle. 159 // previous: Information being used during the last GC cycle. 160 // A new GC cycle starts with the call to finishsweep_m. 161 // finishsweep_m moves the previous arena to the free arena, 162 // the current arena to the previous arena, and 163 // the next arena to the current arena. 164 // The next arena is populated as the spans request 165 // memory to hold gcmarkBits for the next GC cycle as well 166 // as allocBits for newly allocated spans. 167 // 168 // The pointer arithmetic is done "by hand" instead of using 169 // arrays to avoid bounds checks along critical performance 170 // paths. 171 // The sweep will free the old allocBits and set allocBits to the 172 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed 173 // out memory. 174 allocBits *uint8 175 gcmarkBits *uint8 176 177 // sweep generation: 178 // if sweepgen == h->sweepgen - 2, the span needs sweeping 179 // if sweepgen == h->sweepgen - 1, the span is currently being swept 180 // if sweepgen == h->sweepgen, the span is swept and ready to use 181 // h->sweepgen is incremented by 2 after every GC 182 183 sweepgen uint32 184 divMul uint32 // for divide by elemsize - divMagic.mul 185 allocCount uint16 // capacity - number of objects in freelist 186 sizeclass uint8 // size class 187 incache bool // being used by an mcache 188 state uint8 // mspaninuse etc 189 needzero uint8 // needs to be zeroed before allocation 190 divShift uint8 // for divide by elemsize - divMagic.shift 191 divShift2 uint8 // for divide by elemsize - divMagic.shift2 192 elemsize uintptr // computed from sizeclass or from npages 193 unusedsince int64 // first time spotted by gc in mspanfree state 194 npreleased uintptr // number of pages released to the os 195 limit uintptr // end of data in span 196 speciallock mutex // guards specials list 197 specials *special // linked list of special records sorted by offset. 198 baseMask uintptr // if non-0, elemsize is a power of 2, & this will get object allocation base 199 } 200 201 func (s *mspan) base() uintptr { 202 return s.startAddr 203 } 204 205 func (s *mspan) layout() (size, n, total uintptr) { 206 total = s.npages << _PageShift 207 size = s.elemsize 208 if size > 0 { 209 n = total / size 210 } 211 return 212 } 213 214 var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go 215 216 // h_spans is a lookup table to map virtual address page IDs to *mspan. 217 // For allocated spans, their pages map to the span itself. 218 // For free spans, only the lowest and highest pages map to the span itself. Internal 219 // pages map to an arbitrary span. 220 // For pages that have never been allocated, h_spans entries are nil. 221 var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go 222 223 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 224 h := (*mheap)(vh) 225 s := (*mspan)(p) 226 if len(h_allspans) >= cap(h_allspans) { 227 n := 64 * 1024 / sys.PtrSize 228 if n < cap(h_allspans)*3/2 { 229 n = cap(h_allspans) * 3 / 2 230 } 231 var new []*mspan 232 sp := (*slice)(unsafe.Pointer(&new)) 233 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys) 234 if sp.array == nil { 235 throw("runtime: cannot allocate memory") 236 } 237 sp.len = len(h_allspans) 238 sp.cap = n 239 if len(h_allspans) > 0 { 240 copy(new, h_allspans) 241 // Don't free the old array if it's referenced by sweep. 242 // See the comment in mgc.go. 243 if h.allspans != mheap_.gcspans { 244 sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys) 245 } 246 } 247 h_allspans = new 248 h.allspans = (**mspan)(sp.array) 249 } 250 h_allspans = append(h_allspans, s) 251 h.nspan = uint32(len(h_allspans)) 252 } 253 254 // inheap reports whether b is a pointer into a (potentially dead) heap object. 255 // It returns false for pointers into stack spans. 256 // Non-preemptible because it is used by write barriers. 257 //go:nowritebarrier 258 //go:nosplit 259 func inheap(b uintptr) bool { 260 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 261 return false 262 } 263 // Not a beginning of a block, consult span table to find the block beginning. 264 s := h_spans[(b-mheap_.arena_start)>>_PageShift] 265 if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse { 266 return false 267 } 268 return true 269 } 270 271 // inHeapOrStack is a variant of inheap that returns true for pointers into stack spans. 272 //go:nowritebarrier 273 //go:nosplit 274 func inHeapOrStack(b uintptr) bool { 275 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 276 return false 277 } 278 // Not a beginning of a block, consult span table to find the block beginning. 279 s := h_spans[(b-mheap_.arena_start)>>_PageShift] 280 if s == nil || b < s.base() { 281 return false 282 } 283 switch s.state { 284 case mSpanInUse: 285 return b < s.limit 286 case _MSpanStack: 287 return b < s.base()+s.npages<<_PageShift 288 default: 289 return false 290 } 291 } 292 293 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. 294 // Use the functions instead. 295 296 // spanOf returns the span of p. If p does not point into the heap or 297 // no span contains p, spanOf returns nil. 298 func spanOf(p uintptr) *mspan { 299 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { 300 return nil 301 } 302 return spanOfUnchecked(p) 303 } 304 305 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 306 // that p points into the heap (that is, mheap_.arena_start <= p < 307 // mheap_.arena_used). 308 func spanOfUnchecked(p uintptr) *mspan { 309 return h_spans[(p-mheap_.arena_start)>>_PageShift] 310 } 311 312 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { 313 _g_ := getg() 314 315 _g_.m.mcache.local_nlookup++ 316 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 { 317 // purge cache stats to prevent overflow 318 lock(&mheap_.lock) 319 purgecachedstats(_g_.m.mcache) 320 unlock(&mheap_.lock) 321 } 322 323 s := mheap_.lookupMaybe(unsafe.Pointer(v)) 324 if sp != nil { 325 *sp = s 326 } 327 if s == nil { 328 if base != nil { 329 *base = 0 330 } 331 if size != nil { 332 *size = 0 333 } 334 return 0 335 } 336 337 p := s.base() 338 if s.sizeclass == 0 { 339 // Large object. 340 if base != nil { 341 *base = p 342 } 343 if size != nil { 344 *size = s.npages << _PageShift 345 } 346 return 1 347 } 348 349 n := s.elemsize 350 if base != nil { 351 i := (v - p) / n 352 *base = p + i*n 353 } 354 if size != nil { 355 *size = n 356 } 357 358 return 1 359 } 360 361 // Initialize the heap. 362 func (h *mheap) init(spans_size uintptr) { 363 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 364 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 365 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 366 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 367 368 // h->mapcache needs no init 369 for i := range h.free { 370 h.free[i].init() 371 h.busy[i].init() 372 } 373 374 h.freelarge.init() 375 h.busylarge.init() 376 for i := range h.central { 377 h.central[i].mcentral.init(int32(i)) 378 } 379 380 sp := (*slice)(unsafe.Pointer(&h_spans)) 381 sp.array = unsafe.Pointer(h.spans) 382 sp.len = int(spans_size / sys.PtrSize) 383 sp.cap = int(spans_size / sys.PtrSize) 384 } 385 386 // mHeap_MapSpans makes sure that the spans are mapped 387 // up to the new value of arena_used. 388 // 389 // It must be called with the expected new value of arena_used, 390 // *before* h.arena_used has been updated. 391 // Waiting to update arena_used until after the memory has been mapped 392 // avoids faults when other threads try access the bitmap immediately 393 // after observing the change to arena_used. 394 func (h *mheap) mapSpans(arena_used uintptr) { 395 // Map spans array, PageSize at a time. 396 n := arena_used 397 n -= h.arena_start 398 n = n / _PageSize * sys.PtrSize 399 n = round(n, sys.PhysPageSize) 400 if h.spans_mapped >= n { 401 return 402 } 403 sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) 404 h.spans_mapped = n 405 } 406 407 // Sweeps spans in list until reclaims at least npages into heap. 408 // Returns the actual number of pages reclaimed. 409 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { 410 n := uintptr(0) 411 sg := mheap_.sweepgen 412 retry: 413 for s := list.first; s != nil; s = s.next { 414 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { 415 list.remove(s) 416 // swept spans are at the end of the list 417 list.insertBack(s) 418 unlock(&h.lock) 419 snpages := s.npages 420 if s.sweep(false) { 421 n += snpages 422 } 423 lock(&h.lock) 424 if n >= npages { 425 return n 426 } 427 // the span could have been moved elsewhere 428 goto retry 429 } 430 if s.sweepgen == sg-1 { 431 // the span is being sweept by background sweeper, skip 432 continue 433 } 434 // already swept empty span, 435 // all subsequent ones must also be either swept or in process of sweeping 436 break 437 } 438 return n 439 } 440 441 // Sweeps and reclaims at least npage pages into heap. 442 // Called before allocating npage pages. 443 func (h *mheap) reclaim(npage uintptr) { 444 // First try to sweep busy spans with large objects of size >= npage, 445 // this has good chances of reclaiming the necessary space. 446 for i := int(npage); i < len(h.busy); i++ { 447 if h.reclaimList(&h.busy[i], npage) != 0 { 448 return // Bingo! 449 } 450 } 451 452 // Then -- even larger objects. 453 if h.reclaimList(&h.busylarge, npage) != 0 { 454 return // Bingo! 455 } 456 457 // Now try smaller objects. 458 // One such object is not enough, so we need to reclaim several of them. 459 reclaimed := uintptr(0) 460 for i := 0; i < int(npage) && i < len(h.busy); i++ { 461 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) 462 if reclaimed >= npage { 463 return 464 } 465 } 466 467 // Now sweep everything that is not yet swept. 468 unlock(&h.lock) 469 for { 470 n := sweepone() 471 if n == ^uintptr(0) { // all spans are swept 472 break 473 } 474 reclaimed += n 475 if reclaimed >= npage { 476 break 477 } 478 } 479 lock(&h.lock) 480 } 481 482 // Allocate a new span of npage pages from the heap for GC'd memory 483 // and record its size class in the HeapMap and HeapMapCache. 484 func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { 485 _g_ := getg() 486 if _g_ != _g_.m.g0 { 487 throw("_mheap_alloc not on g0 stack") 488 } 489 lock(&h.lock) 490 491 // To prevent excessive heap growth, before allocating n pages 492 // we need to sweep and reclaim at least n pages. 493 if h.sweepdone == 0 { 494 // TODO(austin): This tends to sweep a large number of 495 // spans in order to find a few completely free spans 496 // (for example, in the garbage benchmark, this sweeps 497 // ~30x the number of pages its trying to allocate). 498 // If GC kept a bit for whether there were any marks 499 // in a span, we could release these free spans 500 // at the end of GC and eliminate this entirely. 501 h.reclaim(npage) 502 } 503 504 // transfer stats from cache to global 505 memstats.heap_scan += uint64(_g_.m.mcache.local_scan) 506 _g_.m.mcache.local_scan = 0 507 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) 508 _g_.m.mcache.local_tinyallocs = 0 509 510 s := h.allocSpanLocked(npage) 511 if s != nil { 512 // Record span info, because gc needs to be 513 // able to map interior pointer to containing span. 514 atomic.Store(&s.sweepgen, h.sweepgen) 515 s.state = _MSpanInUse 516 s.allocCount = 0 517 s.sizeclass = uint8(sizeclass) 518 if sizeclass == 0 { 519 s.elemsize = s.npages << _PageShift 520 s.divShift = 0 521 s.divMul = 0 522 s.divShift2 = 0 523 s.baseMask = 0 524 } else { 525 s.elemsize = uintptr(class_to_size[sizeclass]) 526 m := &class_to_divmagic[sizeclass] 527 s.divShift = m.shift 528 s.divMul = m.mul 529 s.divShift2 = m.shift2 530 s.baseMask = m.baseMask 531 } 532 533 // update stats, sweep lists 534 h.pagesInUse += uint64(npage) 535 if large { 536 memstats.heap_objects++ 537 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) 538 // Swept spans are at the end of lists. 539 if s.npages < uintptr(len(h.free)) { 540 h.busy[s.npages].insertBack(s) 541 } else { 542 h.busylarge.insertBack(s) 543 } 544 } 545 } 546 // heap_scan and heap_live were updated. 547 if gcBlackenEnabled != 0 { 548 gcController.revise() 549 } 550 551 if trace.enabled { 552 traceHeapAlloc() 553 } 554 555 // h_spans is accessed concurrently without synchronization 556 // from other threads. Hence, there must be a store/store 557 // barrier here to ensure the writes to h_spans above happen 558 // before the caller can publish a pointer p to an object 559 // allocated from s. As soon as this happens, the garbage 560 // collector running on another processor could read p and 561 // look up s in h_spans. The unlock acts as the barrier to 562 // order these writes. On the read side, the data dependency 563 // between p and the index in h_spans orders the reads. 564 unlock(&h.lock) 565 return s 566 } 567 568 func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan { 569 // Don't do any operations that lock the heap on the G stack. 570 // It might trigger stack growth, and the stack growth code needs 571 // to be able to allocate heap. 572 var s *mspan 573 systemstack(func() { 574 s = h.alloc_m(npage, sizeclass, large) 575 }) 576 577 if s != nil { 578 if needzero && s.needzero != 0 { 579 memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift) 580 } 581 s.needzero = 0 582 } 583 return s 584 } 585 586 func (h *mheap) allocStack(npage uintptr) *mspan { 587 _g_ := getg() 588 if _g_ != _g_.m.g0 { 589 throw("mheap_allocstack not on g0 stack") 590 } 591 lock(&h.lock) 592 s := h.allocSpanLocked(npage) 593 if s != nil { 594 s.state = _MSpanStack 595 s.stackfreelist = 0 596 s.allocCount = 0 597 memstats.stacks_inuse += uint64(s.npages << _PageShift) 598 } 599 600 // This unlock acts as a release barrier. See mHeap_Alloc_m. 601 unlock(&h.lock) 602 return s 603 } 604 605 // Allocates a span of the given size. h must be locked. 606 // The returned span has been removed from the 607 // free list, but its state is still MSpanFree. 608 func (h *mheap) allocSpanLocked(npage uintptr) *mspan { 609 var list *mSpanList 610 var s *mspan 611 612 // Try in fixed-size lists up to max. 613 for i := int(npage); i < len(h.free); i++ { 614 list = &h.free[i] 615 if !list.isEmpty() { 616 s = list.first 617 goto HaveSpan 618 } 619 } 620 621 // Best fit in list of large spans. 622 list = &h.freelarge 623 s = h.allocLarge(npage) 624 if s == nil { 625 if !h.grow(npage) { 626 return nil 627 } 628 s = h.allocLarge(npage) 629 if s == nil { 630 return nil 631 } 632 } 633 634 HaveSpan: 635 // Mark span in use. 636 if s.state != _MSpanFree { 637 throw("MHeap_AllocLocked - MSpan not free") 638 } 639 if s.npages < npage { 640 throw("MHeap_AllocLocked - bad npages") 641 } 642 list.remove(s) 643 if s.inList() { 644 throw("still in list") 645 } 646 if s.npreleased > 0 { 647 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) 648 memstats.heap_released -= uint64(s.npreleased << _PageShift) 649 s.npreleased = 0 650 } 651 652 if s.npages > npage { 653 // Trim extra and put it back in the heap. 654 t := (*mspan)(h.spanalloc.alloc()) 655 t.init(s.base()+npage<<_PageShift, s.npages-npage) 656 s.npages = npage 657 p := (t.base() - h.arena_start) >> _PageShift 658 if p > 0 { 659 h_spans[p-1] = s 660 } 661 h_spans[p] = t 662 h_spans[p+t.npages-1] = t 663 t.needzero = s.needzero 664 s.state = _MSpanStack // prevent coalescing with s 665 t.state = _MSpanStack 666 h.freeSpanLocked(t, false, false, s.unusedsince) 667 s.state = _MSpanFree 668 } 669 s.unusedsince = 0 670 671 p := (s.base() - h.arena_start) >> _PageShift 672 for n := uintptr(0); n < npage; n++ { 673 h_spans[p+n] = s 674 } 675 676 memstats.heap_inuse += uint64(npage << _PageShift) 677 memstats.heap_idle -= uint64(npage << _PageShift) 678 679 //println("spanalloc", hex(s.start<<_PageShift)) 680 if s.inList() { 681 throw("still in list") 682 } 683 return s 684 } 685 686 // Allocate a span of exactly npage pages from the list of large spans. 687 func (h *mheap) allocLarge(npage uintptr) *mspan { 688 return bestFit(&h.freelarge, npage, nil) 689 } 690 691 // Search list for smallest span with >= npage pages. 692 // If there are multiple smallest spans, take the one 693 // with the earliest starting address. 694 func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan { 695 for s := list.first; s != nil; s = s.next { 696 if s.npages < npage { 697 continue 698 } 699 if best == nil || s.npages < best.npages || (s.npages == best.npages && s.base() < best.base()) { 700 best = s 701 } 702 } 703 return best 704 } 705 706 // Try to add at least npage pages of memory to the heap, 707 // returning whether it worked. 708 // 709 // h must be locked. 710 func (h *mheap) grow(npage uintptr) bool { 711 // Ask for a big chunk, to reduce the number of mappings 712 // the operating system needs to track; also amortizes 713 // the overhead of an operating system mapping. 714 // Allocate a multiple of 64kB. 715 npage = round(npage, (64<<10)/_PageSize) 716 ask := npage << _PageShift 717 if ask < _HeapAllocChunk { 718 ask = _HeapAllocChunk 719 } 720 721 v := h.sysAlloc(ask) 722 if v == nil { 723 if ask > npage<<_PageShift { 724 ask = npage << _PageShift 725 v = h.sysAlloc(ask) 726 } 727 if v == nil { 728 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") 729 return false 730 } 731 } 732 733 // Create a fake "in use" span and free it, so that the 734 // right coalescing happens. 735 s := (*mspan)(h.spanalloc.alloc()) 736 s.init(uintptr(v), ask>>_PageShift) 737 p := (s.base() - h.arena_start) >> _PageShift 738 for i := p; i < p+s.npages; i++ { 739 h_spans[i] = s 740 } 741 atomic.Store(&s.sweepgen, h.sweepgen) 742 s.state = _MSpanInUse 743 h.pagesInUse += uint64(s.npages) 744 h.freeSpanLocked(s, false, true, 0) 745 return true 746 } 747 748 // Look up the span at the given address. 749 // Address is guaranteed to be in map 750 // and is guaranteed to be start or end of span. 751 func (h *mheap) lookup(v unsafe.Pointer) *mspan { 752 p := uintptr(v) 753 p -= h.arena_start 754 return h_spans[p>>_PageShift] 755 } 756 757 // Look up the span at the given address. 758 // Address is *not* guaranteed to be in map 759 // and may be anywhere in the span. 760 // Map entries for the middle of a span are only 761 // valid for allocated spans. Free spans may have 762 // other garbage in their middles, so we have to 763 // check for that. 764 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { 765 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { 766 return nil 767 } 768 s := h_spans[(uintptr(v)-h.arena_start)>>_PageShift] 769 if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { 770 return nil 771 } 772 return s 773 } 774 775 // Free the span back into the heap. 776 func (h *mheap) freeSpan(s *mspan, acct int32) { 777 systemstack(func() { 778 mp := getg().m 779 lock(&h.lock) 780 memstats.heap_scan += uint64(mp.mcache.local_scan) 781 mp.mcache.local_scan = 0 782 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) 783 mp.mcache.local_tinyallocs = 0 784 if msanenabled { 785 // Tell msan that this entire span is no longer in use. 786 base := unsafe.Pointer(s.base()) 787 bytes := s.npages << _PageShift 788 msanfree(base, bytes) 789 } 790 if acct != 0 { 791 memstats.heap_objects-- 792 } 793 if gcBlackenEnabled != 0 { 794 // heap_scan changed. 795 gcController.revise() 796 } 797 h.freeSpanLocked(s, true, true, 0) 798 unlock(&h.lock) 799 }) 800 } 801 802 func (h *mheap) freeStack(s *mspan) { 803 _g_ := getg() 804 if _g_ != _g_.m.g0 { 805 throw("mheap_freestack not on g0 stack") 806 } 807 s.needzero = 1 808 lock(&h.lock) 809 memstats.stacks_inuse -= uint64(s.npages << _PageShift) 810 h.freeSpanLocked(s, true, true, 0) 811 unlock(&h.lock) 812 } 813 814 // s must be on a busy list (h.busy or h.busylarge) or unlinked. 815 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { 816 switch s.state { 817 case _MSpanStack: 818 if s.allocCount != 0 { 819 throw("MHeap_FreeSpanLocked - invalid stack free") 820 } 821 case _MSpanInUse: 822 if s.allocCount != 0 || s.sweepgen != h.sweepgen { 823 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 824 throw("MHeap_FreeSpanLocked - invalid free") 825 } 826 h.pagesInUse -= uint64(s.npages) 827 default: 828 throw("MHeap_FreeSpanLocked - invalid span state") 829 } 830 831 if acctinuse { 832 memstats.heap_inuse -= uint64(s.npages << _PageShift) 833 } 834 if acctidle { 835 memstats.heap_idle += uint64(s.npages << _PageShift) 836 } 837 s.state = _MSpanFree 838 if s.inList() { 839 h.busyList(s.npages).remove(s) 840 } 841 842 // Stamp newly unused spans. The scavenger will use that 843 // info to potentially give back some pages to the OS. 844 s.unusedsince = unusedsince 845 if unusedsince == 0 { 846 s.unusedsince = nanotime() 847 } 848 s.npreleased = 0 849 850 // Coalesce with earlier, later spans. 851 p := (s.base() - h.arena_start) >> _PageShift 852 if p > 0 { 853 t := h_spans[p-1] 854 if t != nil && t.state == _MSpanFree { 855 s.startAddr = t.startAddr 856 s.npages += t.npages 857 s.npreleased = t.npreleased // absorb released pages 858 s.needzero |= t.needzero 859 p -= t.npages 860 h_spans[p] = s 861 h.freeList(t.npages).remove(t) 862 t.state = _MSpanDead 863 h.spanalloc.free(unsafe.Pointer(t)) 864 } 865 } 866 if (p+s.npages)*sys.PtrSize < h.spans_mapped { 867 t := h_spans[p+s.npages] 868 if t != nil && t.state == _MSpanFree { 869 s.npages += t.npages 870 s.npreleased += t.npreleased 871 s.needzero |= t.needzero 872 h_spans[p+s.npages-1] = s 873 h.freeList(t.npages).remove(t) 874 t.state = _MSpanDead 875 h.spanalloc.free(unsafe.Pointer(t)) 876 } 877 } 878 879 // Insert s into appropriate list. 880 h.freeList(s.npages).insert(s) 881 } 882 883 func (h *mheap) freeList(npages uintptr) *mSpanList { 884 if npages < uintptr(len(h.free)) { 885 return &h.free[npages] 886 } 887 return &h.freelarge 888 } 889 890 func (h *mheap) busyList(npages uintptr) *mSpanList { 891 if npages < uintptr(len(h.free)) { 892 return &h.busy[npages] 893 } 894 return &h.busylarge 895 } 896 897 func scavengelist(list *mSpanList, now, limit uint64) uintptr { 898 if list.isEmpty() { 899 return 0 900 } 901 902 var sumreleased uintptr 903 for s := list.first; s != nil; s = s.next { 904 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { 905 start := s.base() 906 end := start + s.npages<<_PageShift 907 if sys.PhysPageSize > _PageSize { 908 // We can only release pages in 909 // PhysPageSize blocks, so round start 910 // and end in. (Otherwise, madvise 911 // will round them *out* and release 912 // more memory than we want.) 913 start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1) 914 end &^= sys.PhysPageSize - 1 915 if start == end { 916 continue 917 } 918 } 919 len := end - start 920 921 released := len - (s.npreleased << _PageShift) 922 if sys.PhysPageSize > _PageSize && released == 0 { 923 continue 924 } 925 memstats.heap_released += uint64(released) 926 sumreleased += released 927 s.npreleased = len >> _PageShift 928 sysUnused(unsafe.Pointer(start), len) 929 } 930 } 931 return sumreleased 932 } 933 934 func (h *mheap) scavenge(k int32, now, limit uint64) { 935 lock(&h.lock) 936 var sumreleased uintptr 937 for i := 0; i < len(h.free); i++ { 938 sumreleased += scavengelist(&h.free[i], now, limit) 939 } 940 sumreleased += scavengelist(&h.freelarge, now, limit) 941 unlock(&h.lock) 942 943 if debug.gctrace > 0 { 944 if sumreleased > 0 { 945 print("scvg", k, ": ", sumreleased>>20, " MB released\n") 946 } 947 // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap. 948 // But we can't call ReadMemStats on g0 holding locks. 949 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") 950 } 951 } 952 953 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 954 func runtime_debug_freeOSMemory() { 955 gcStart(gcForceBlockMode, false) 956 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) 957 } 958 959 // Initialize a new span with the given start and npages. 960 func (span *mspan) init(base uintptr, npages uintptr) { 961 span.next = nil 962 span.prev = nil 963 span.list = nil 964 span.startAddr = base 965 span.npages = npages 966 span.allocCount = 0 967 span.sizeclass = 0 968 span.incache = false 969 span.elemsize = 0 970 span.state = _MSpanDead 971 span.unusedsince = 0 972 span.npreleased = 0 973 span.speciallock.key = 0 974 span.specials = nil 975 span.needzero = 0 976 span.freeindex = 0 977 span.allocBits = nil 978 span.gcmarkBits = nil 979 } 980 981 func (span *mspan) inList() bool { 982 return span.prev != nil 983 } 984 985 // Initialize an empty doubly-linked list. 986 func (list *mSpanList) init() { 987 list.first = nil 988 list.last = &list.first 989 } 990 991 func (list *mSpanList) remove(span *mspan) { 992 if span.prev == nil || span.list != list { 993 println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list) 994 throw("MSpanList_Remove") 995 } 996 if span.next != nil { 997 span.next.prev = span.prev 998 } else { 999 // TODO: After we remove the span.list != list check above, 1000 // we could at least still check list.last == &span.next here. 1001 list.last = span.prev 1002 } 1003 *span.prev = span.next 1004 span.next = nil 1005 span.prev = nil 1006 span.list = nil 1007 } 1008 1009 func (list *mSpanList) isEmpty() bool { 1010 return list.first == nil 1011 } 1012 1013 func (list *mSpanList) insert(span *mspan) { 1014 if span.next != nil || span.prev != nil || span.list != nil { 1015 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list) 1016 throw("MSpanList_Insert") 1017 } 1018 span.next = list.first 1019 if list.first != nil { 1020 list.first.prev = &span.next 1021 } else { 1022 list.last = &span.next 1023 } 1024 list.first = span 1025 span.prev = &list.first 1026 span.list = list 1027 } 1028 1029 func (list *mSpanList) insertBack(span *mspan) { 1030 if span.next != nil || span.prev != nil || span.list != nil { 1031 println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list) 1032 throw("MSpanList_InsertBack") 1033 } 1034 span.next = nil 1035 span.prev = list.last 1036 *list.last = span 1037 list.last = &span.next 1038 span.list = list 1039 } 1040 1041 const ( 1042 _KindSpecialFinalizer = 1 1043 _KindSpecialProfile = 2 1044 // Note: The finalizer special must be first because if we're freeing 1045 // an object, a finalizer special will cause the freeing operation 1046 // to abort, and we want to keep the other special records around 1047 // if that happens. 1048 ) 1049 1050 type special struct { 1051 next *special // linked list in span 1052 offset uint16 // span offset of object 1053 kind byte // kind of special 1054 } 1055 1056 // Adds the special record s to the list of special records for 1057 // the object p. All fields of s should be filled in except for 1058 // offset & next, which this routine will fill in. 1059 // Returns true if the special was successfully added, false otherwise. 1060 // (The add will fail only if a record with the same p and s->kind 1061 // already exists.) 1062 func addspecial(p unsafe.Pointer, s *special) bool { 1063 span := mheap_.lookupMaybe(p) 1064 if span == nil { 1065 throw("addspecial on invalid pointer") 1066 } 1067 1068 // Ensure that the span is swept. 1069 // Sweeping accesses the specials list w/o locks, so we have 1070 // to synchronize with it. And it's just much safer. 1071 mp := acquirem() 1072 span.ensureSwept() 1073 1074 offset := uintptr(p) - span.base() 1075 kind := s.kind 1076 1077 lock(&span.speciallock) 1078 1079 // Find splice point, check for existing record. 1080 t := &span.specials 1081 for { 1082 x := *t 1083 if x == nil { 1084 break 1085 } 1086 if offset == uintptr(x.offset) && kind == x.kind { 1087 unlock(&span.speciallock) 1088 releasem(mp) 1089 return false // already exists 1090 } 1091 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) { 1092 break 1093 } 1094 t = &x.next 1095 } 1096 1097 // Splice in record, fill in offset. 1098 s.offset = uint16(offset) 1099 s.next = *t 1100 *t = s 1101 unlock(&span.speciallock) 1102 releasem(mp) 1103 1104 return true 1105 } 1106 1107 // Removes the Special record of the given kind for the object p. 1108 // Returns the record if the record existed, nil otherwise. 1109 // The caller must FixAlloc_Free the result. 1110 func removespecial(p unsafe.Pointer, kind uint8) *special { 1111 span := mheap_.lookupMaybe(p) 1112 if span == nil { 1113 throw("removespecial on invalid pointer") 1114 } 1115 1116 // Ensure that the span is swept. 1117 // Sweeping accesses the specials list w/o locks, so we have 1118 // to synchronize with it. And it's just much safer. 1119 mp := acquirem() 1120 span.ensureSwept() 1121 1122 offset := uintptr(p) - span.base() 1123 1124 lock(&span.speciallock) 1125 t := &span.specials 1126 for { 1127 s := *t 1128 if s == nil { 1129 break 1130 } 1131 // This function is used for finalizers only, so we don't check for 1132 // "interior" specials (p must be exactly equal to s->offset). 1133 if offset == uintptr(s.offset) && kind == s.kind { 1134 *t = s.next 1135 unlock(&span.speciallock) 1136 releasem(mp) 1137 return s 1138 } 1139 t = &s.next 1140 } 1141 unlock(&span.speciallock) 1142 releasem(mp) 1143 return nil 1144 } 1145 1146 // The described object has a finalizer set for it. 1147 type specialfinalizer struct { 1148 special special 1149 fn *funcval 1150 nret uintptr 1151 fint *_type 1152 ot *ptrtype 1153 } 1154 1155 // Adds a finalizer to the object p. Returns true if it succeeded. 1156 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 1157 lock(&mheap_.speciallock) 1158 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 1159 unlock(&mheap_.speciallock) 1160 s.special.kind = _KindSpecialFinalizer 1161 s.fn = f 1162 s.nret = nret 1163 s.fint = fint 1164 s.ot = ot 1165 if addspecial(p, &s.special) { 1166 // This is responsible for maintaining the same 1167 // GC-related invariants as markrootSpans in any 1168 // situation where it's possible that markrootSpans 1169 // has already run but mark termination hasn't yet. 1170 if gcphase != _GCoff { 1171 _, base, _ := findObject(p) 1172 mp := acquirem() 1173 gcw := &mp.p.ptr().gcw 1174 // Mark everything reachable from the object 1175 // so it's retained for the finalizer. 1176 scanobject(uintptr(base), gcw) 1177 // Mark the finalizer itself, since the 1178 // special isn't part of the GC'd heap. 1179 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) 1180 if gcBlackenPromptly { 1181 gcw.dispose() 1182 } 1183 releasem(mp) 1184 } 1185 return true 1186 } 1187 1188 // There was an old finalizer 1189 lock(&mheap_.speciallock) 1190 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1191 unlock(&mheap_.speciallock) 1192 return false 1193 } 1194 1195 // Removes the finalizer (if any) from the object p. 1196 func removefinalizer(p unsafe.Pointer) { 1197 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 1198 if s == nil { 1199 return // there wasn't a finalizer to remove 1200 } 1201 lock(&mheap_.speciallock) 1202 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1203 unlock(&mheap_.speciallock) 1204 } 1205 1206 // The described object is being heap profiled. 1207 type specialprofile struct { 1208 special special 1209 b *bucket 1210 } 1211 1212 // Set the heap profile bucket associated with addr to b. 1213 func setprofilebucket(p unsafe.Pointer, b *bucket) { 1214 lock(&mheap_.speciallock) 1215 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 1216 unlock(&mheap_.speciallock) 1217 s.special.kind = _KindSpecialProfile 1218 s.b = b 1219 if !addspecial(p, &s.special) { 1220 throw("setprofilebucket: profile already set") 1221 } 1222 } 1223 1224 // Do whatever cleanup needs to be done to deallocate s. It has 1225 // already been unlinked from the MSpan specials list. 1226 func freespecial(s *special, p unsafe.Pointer, size uintptr) { 1227 switch s.kind { 1228 case _KindSpecialFinalizer: 1229 sf := (*specialfinalizer)(unsafe.Pointer(s)) 1230 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 1231 lock(&mheap_.speciallock) 1232 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 1233 unlock(&mheap_.speciallock) 1234 case _KindSpecialProfile: 1235 sp := (*specialprofile)(unsafe.Pointer(s)) 1236 mProf_Free(sp.b, size) 1237 lock(&mheap_.speciallock) 1238 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 1239 unlock(&mheap_.speciallock) 1240 default: 1241 throw("bad special kind") 1242 panic("not reached") 1243 } 1244 } 1245 1246 const gcBitsChunkBytes = uintptr(64 << 10) 1247 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) 1248 1249 type gcBitsHeader struct { 1250 free uintptr // free is the index into bits of the next free byte. 1251 next uintptr // *gcBits triggers recursive type bug. (issue 14620) 1252 } 1253 1254 type gcBits struct { 1255 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. 1256 free uintptr // free is the index into bits of the next free byte. 1257 next *gcBits 1258 bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8 1259 } 1260 1261 var gcBitsArenas struct { 1262 lock mutex 1263 free *gcBits 1264 next *gcBits 1265 current *gcBits 1266 previous *gcBits 1267 } 1268 1269 // newMarkBits returns a pointer to 8 byte aligned bytes 1270 // to be used for a span's mark bits. 1271 func newMarkBits(nelems uintptr) *uint8 { 1272 lock(&gcBitsArenas.lock) 1273 blocksNeeded := uintptr((nelems + 63) / 64) 1274 bytesNeeded := blocksNeeded * 8 1275 if gcBitsArenas.next == nil || 1276 gcBitsArenas.next.free+bytesNeeded > uintptr(len(gcBits{}.bits)) { 1277 // Allocate a new arena. 1278 fresh := newArena() 1279 fresh.next = gcBitsArenas.next 1280 gcBitsArenas.next = fresh 1281 } 1282 if gcBitsArenas.next.free >= gcBitsChunkBytes { 1283 println("runtime: gcBitsArenas.next.free=", gcBitsArenas.next.free, gcBitsChunkBytes) 1284 throw("markBits overflow") 1285 } 1286 result := &gcBitsArenas.next.bits[gcBitsArenas.next.free] 1287 gcBitsArenas.next.free += bytesNeeded 1288 unlock(&gcBitsArenas.lock) 1289 return result 1290 } 1291 1292 // newAllocBits returns a pointer to 8 byte aligned bytes 1293 // to be used for this span's alloc bits. 1294 // newAllocBits is used to provide newly initialized spans 1295 // allocation bits. For spans not being initialized the 1296 // the mark bits are repurposed as allocation bits when 1297 // the span is swept. 1298 func newAllocBits(nelems uintptr) *uint8 { 1299 return newMarkBits(nelems) 1300 } 1301 1302 // nextMarkBitArenaEpoch establishes a new epoch for the arenas 1303 // holding the mark bits. The arenas are named relative to the 1304 // current GC cycle which is demarcated by the call to finishweep_m. 1305 // 1306 // All current spans have been swept. 1307 // During that sweep each span allocated room for its gcmarkBits in 1308 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current 1309 // where the GC will mark objects and after each span is swept these bits 1310 // will be used to allocate objects. 1311 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's 1312 // gcAllocBits live until all the spans have been swept during this GC cycle. 1313 // The span's sweep extinguishes all the references to gcBitsArenas.previous 1314 // by pointing gcAllocBits into the gcBitsArenas.current. 1315 // The gcBitsArenas.previous is released to the gcBitsArenas.free list. 1316 func nextMarkBitArenaEpoch() { 1317 lock(&gcBitsArenas.lock) 1318 if gcBitsArenas.previous != nil { 1319 if gcBitsArenas.free == nil { 1320 gcBitsArenas.free = gcBitsArenas.previous 1321 } else { 1322 // Find end of previous arenas. 1323 last := gcBitsArenas.previous 1324 for last = gcBitsArenas.previous; last.next != nil; last = last.next { 1325 } 1326 last.next = gcBitsArenas.free 1327 gcBitsArenas.free = gcBitsArenas.previous 1328 } 1329 } 1330 gcBitsArenas.previous = gcBitsArenas.current 1331 gcBitsArenas.current = gcBitsArenas.next 1332 gcBitsArenas.next = nil // newMarkBits calls newArena when needed 1333 unlock(&gcBitsArenas.lock) 1334 } 1335 1336 // newArena allocates and zeroes a gcBits arena. 1337 func newArena() *gcBits { 1338 var result *gcBits 1339 if gcBitsArenas.free == nil { 1340 result = (*gcBits)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys)) 1341 if result == nil { 1342 throw("runtime: cannot allocate memory") 1343 } 1344 } else { 1345 result = gcBitsArenas.free 1346 gcBitsArenas.free = gcBitsArenas.free.next 1347 memclr(unsafe.Pointer(result), gcBitsChunkBytes) 1348 } 1349 result.next = nil 1350 // If result.bits is not 8 byte aligned adjust index so 1351 // that &result.bits[result.free] is 8 byte aligned. 1352 if uintptr(unsafe.Offsetof(gcBits{}.bits))&7 == 0 { 1353 result.free = 0 1354 } else { 1355 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) 1356 } 1357 return result 1358 }