github.com/robhaswell/grandperspective-scan@v0.1.0/test/go-go1.7.1/src/runtime/mheap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // minPhysPageSize is a lower-bound on the physical page size. The 18 // true physical page size may be larger than this. In contrast, 19 // sys.PhysPageSize is an upper-bound on the physical page size. 20 const minPhysPageSize = 4096 21 22 // Main malloc heap. 23 // The heap itself is the "free[]" and "large" arrays, 24 // but all the other global data is here too. 25 type mheap struct { 26 lock mutex 27 free [_MaxMHeapList]mSpanList // free lists of given length 28 freelarge mSpanList // free lists length >= _MaxMHeapList 29 busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length 30 busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList 31 allspans **mspan // all spans out there 32 gcspans **mspan // copy of allspans referenced by gc marker or sweeper 33 nspan uint32 34 sweepgen uint32 // sweep generation, see comment in mspan 35 sweepdone uint32 // all spans are swept 36 // span lookup 37 spans **mspan 38 spans_mapped uintptr 39 40 // Proportional sweep 41 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock 42 spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically 43 pagesSwept uint64 // pages swept this cycle; updated atomically 44 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 45 // TODO(austin): pagesInUse should be a uintptr, but the 386 46 // compiler can't 8-byte align fields. 47 48 // Malloc stats. 49 largefree uint64 // bytes freed for large objects (>maxsmallsize) 50 nlargefree uint64 // number of frees for large objects (>maxsmallsize) 51 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) 52 53 // range of addresses we might see in the heap 54 bitmap uintptr // Points to one byte past the end of the bitmap 55 bitmap_mapped uintptr 56 arena_start uintptr 57 arena_used uintptr // always mHeap_Map{Bits,Spans} before updating 58 arena_end uintptr 59 arena_reserved bool 60 61 // central free lists for small size classes. 62 // the padding makes sure that the MCentrals are 63 // spaced CacheLineSize bytes apart, so that each MCentral.lock 64 // gets its own cache line. 65 central [_NumSizeClasses]struct { 66 mcentral mcentral 67 pad [sys.CacheLineSize]byte 68 } 69 70 spanalloc fixalloc // allocator for span* 71 cachealloc fixalloc // allocator for mcache* 72 specialfinalizeralloc fixalloc // allocator for specialfinalizer* 73 specialprofilealloc fixalloc // allocator for specialprofile* 74 speciallock mutex // lock for special record allocators. 75 } 76 77 var mheap_ mheap 78 79 // An MSpan is a run of pages. 80 // 81 // When a MSpan is in the heap free list, state == MSpanFree 82 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 83 // 84 // When a MSpan is allocated, state == MSpanInUse or MSpanStack 85 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 86 87 // Every MSpan is in one doubly-linked list, 88 // either one of the MHeap's free lists or one of the 89 // MCentral's span lists. 90 91 // An MSpan representing actual memory has state _MSpanInUse, 92 // _MSpanStack, or _MSpanFree. Transitions between these states are 93 // constrained as follows: 94 // 95 // * A span may transition from free to in-use or stack during any GC 96 // phase. 97 // 98 // * During sweeping (gcphase == _GCoff), a span may transition from 99 // in-use to free (as a result of sweeping) or stack to free (as a 100 // result of stacks being freed). 101 // 102 // * During GC (gcphase != _GCoff), a span *must not* transition from 103 // stack or in-use to free. Because concurrent GC may read a pointer 104 // and then look up its span, the span state must be monotonic. 105 const ( 106 _MSpanInUse = iota // allocated for garbage collected heap 107 _MSpanStack // allocated for use by stack allocator 108 _MSpanFree 109 _MSpanDead 110 ) 111 112 // mSpanList heads a linked list of spans. 113 // 114 // Linked list structure is based on BSD's "tail queue" data structure. 115 type mSpanList struct { 116 first *mspan // first span in list, or nil if none 117 last **mspan // last span's next field, or first if none 118 } 119 120 type mspan struct { 121 next *mspan // next span in list, or nil if none 122 prev **mspan // previous span's next field, or list head's first field if none 123 list *mSpanList // For debugging. TODO: Remove. 124 125 startAddr uintptr // address of first byte of span aka s.base() 126 npages uintptr // number of pages in span 127 stackfreelist gclinkptr // list of free stacks, avoids overloading freelist 128 129 // freeindex is the slot index between 0 and nelems at which to begin scanning 130 // for the next free object in this span. 131 // Each allocation scans allocBits starting at freeindex until it encounters a 0 132 // indicating a free object. freeindex is then adjusted so that subsequent scans begin 133 // just past the the newly discovered free object. 134 // 135 // If freeindex == nelem, this span has no free objects. 136 // 137 // allocBits is a bitmap of objects in this span. 138 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 139 // then object n is free; 140 // otherwise, object n is allocated. Bits starting at nelem are 141 // undefined and should never be referenced. 142 // 143 // Object n starts at address n*elemsize + (start << pageShift). 144 freeindex uintptr 145 // TODO: Look up nelems from sizeclass and remove this field if it 146 // helps performance. 147 nelems uintptr // number of object in the span. 148 149 // Cache of the allocBits at freeindex. allocCache is shifted 150 // such that the lowest bit corresponds to the bit freeindex. 151 // allocCache holds the complement of allocBits, thus allowing 152 // ctz (count trailing zero) to use it directly. 153 // allocCache may contain bits beyond s.nelems; the caller must ignore 154 // these. 155 allocCache uint64 156 157 // allocBits and gcmarkBits hold pointers to a span's mark and 158 // allocation bits. The pointers are 8 byte aligned. 159 // There are three arenas where this data is held. 160 // free: Dirty arenas that are no longer accessed 161 // and can be reused. 162 // next: Holds information to be used in the next GC cycle. 163 // current: Information being used during this GC cycle. 164 // previous: Information being used during the last GC cycle. 165 // A new GC cycle starts with the call to finishsweep_m. 166 // finishsweep_m moves the previous arena to the free arena, 167 // the current arena to the previous arena, and 168 // the next arena to the current arena. 169 // The next arena is populated as the spans request 170 // memory to hold gcmarkBits for the next GC cycle as well 171 // as allocBits for newly allocated spans. 172 // 173 // The pointer arithmetic is done "by hand" instead of using 174 // arrays to avoid bounds checks along critical performance 175 // paths. 176 // The sweep will free the old allocBits and set allocBits to the 177 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed 178 // out memory. 179 allocBits *uint8 180 gcmarkBits *uint8 181 182 // sweep generation: 183 // if sweepgen == h->sweepgen - 2, the span needs sweeping 184 // if sweepgen == h->sweepgen - 1, the span is currently being swept 185 // if sweepgen == h->sweepgen, the span is swept and ready to use 186 // h->sweepgen is incremented by 2 after every GC 187 188 sweepgen uint32 189 divMul uint32 // for divide by elemsize - divMagic.mul 190 allocCount uint16 // capacity - number of objects in freelist 191 sizeclass uint8 // size class 192 incache bool // being used by an mcache 193 state uint8 // mspaninuse etc 194 needzero uint8 // needs to be zeroed before allocation 195 divShift uint8 // for divide by elemsize - divMagic.shift 196 divShift2 uint8 // for divide by elemsize - divMagic.shift2 197 elemsize uintptr // computed from sizeclass or from npages 198 unusedsince int64 // first time spotted by gc in mspanfree state 199 npreleased uintptr // number of pages released to the os 200 limit uintptr // end of data in span 201 speciallock mutex // guards specials list 202 specials *special // linked list of special records sorted by offset. 203 baseMask uintptr // if non-0, elemsize is a power of 2, & this will get object allocation base 204 } 205 206 func (s *mspan) base() uintptr { 207 return s.startAddr 208 } 209 210 func (s *mspan) layout() (size, n, total uintptr) { 211 total = s.npages << _PageShift 212 size = s.elemsize 213 if size > 0 { 214 n = total / size 215 } 216 return 217 } 218 219 var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go 220 221 // h_spans is a lookup table to map virtual address page IDs to *mspan. 222 // For allocated spans, their pages map to the span itself. 223 // For free spans, only the lowest and highest pages map to the span itself. Internal 224 // pages map to an arbitrary span. 225 // For pages that have never been allocated, h_spans entries are nil. 226 var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go 227 228 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 229 h := (*mheap)(vh) 230 s := (*mspan)(p) 231 if len(h_allspans) >= cap(h_allspans) { 232 n := 64 * 1024 / sys.PtrSize 233 if n < cap(h_allspans)*3/2 { 234 n = cap(h_allspans) * 3 / 2 235 } 236 var new []*mspan 237 sp := (*slice)(unsafe.Pointer(&new)) 238 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys) 239 if sp.array == nil { 240 throw("runtime: cannot allocate memory") 241 } 242 sp.len = len(h_allspans) 243 sp.cap = n 244 if len(h_allspans) > 0 { 245 copy(new, h_allspans) 246 // Don't free the old array if it's referenced by sweep. 247 // See the comment in mgc.go. 248 if h.allspans != mheap_.gcspans { 249 sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys) 250 } 251 } 252 h_allspans = new 253 h.allspans = (**mspan)(sp.array) 254 } 255 h_allspans = append(h_allspans, s) 256 h.nspan = uint32(len(h_allspans)) 257 } 258 259 // inheap reports whether b is a pointer into a (potentially dead) heap object. 260 // It returns false for pointers into stack spans. 261 // Non-preemptible because it is used by write barriers. 262 //go:nowritebarrier 263 //go:nosplit 264 func inheap(b uintptr) bool { 265 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 266 return false 267 } 268 // Not a beginning of a block, consult span table to find the block beginning. 269 s := h_spans[(b-mheap_.arena_start)>>_PageShift] 270 if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse { 271 return false 272 } 273 return true 274 } 275 276 // inHeapOrStack is a variant of inheap that returns true for pointers into stack spans. 277 //go:nowritebarrier 278 //go:nosplit 279 func inHeapOrStack(b uintptr) bool { 280 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used { 281 return false 282 } 283 // Not a beginning of a block, consult span table to find the block beginning. 284 s := h_spans[(b-mheap_.arena_start)>>_PageShift] 285 if s == nil || b < s.base() { 286 return false 287 } 288 switch s.state { 289 case mSpanInUse: 290 return b < s.limit 291 case _MSpanStack: 292 return b < s.base()+s.npages<<_PageShift 293 default: 294 return false 295 } 296 } 297 298 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. 299 // Use the functions instead. 300 301 // spanOf returns the span of p. If p does not point into the heap or 302 // no span contains p, spanOf returns nil. 303 func spanOf(p uintptr) *mspan { 304 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { 305 return nil 306 } 307 return spanOfUnchecked(p) 308 } 309 310 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 311 // that p points into the heap (that is, mheap_.arena_start <= p < 312 // mheap_.arena_used). 313 func spanOfUnchecked(p uintptr) *mspan { 314 return h_spans[(p-mheap_.arena_start)>>_PageShift] 315 } 316 317 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { 318 _g_ := getg() 319 320 _g_.m.mcache.local_nlookup++ 321 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 { 322 // purge cache stats to prevent overflow 323 lock(&mheap_.lock) 324 purgecachedstats(_g_.m.mcache) 325 unlock(&mheap_.lock) 326 } 327 328 s := mheap_.lookupMaybe(unsafe.Pointer(v)) 329 if sp != nil { 330 *sp = s 331 } 332 if s == nil { 333 if base != nil { 334 *base = 0 335 } 336 if size != nil { 337 *size = 0 338 } 339 return 0 340 } 341 342 p := s.base() 343 if s.sizeclass == 0 { 344 // Large object. 345 if base != nil { 346 *base = p 347 } 348 if size != nil { 349 *size = s.npages << _PageShift 350 } 351 return 1 352 } 353 354 n := s.elemsize 355 if base != nil { 356 i := (v - p) / n 357 *base = p + i*n 358 } 359 if size != nil { 360 *size = n 361 } 362 363 return 1 364 } 365 366 // Initialize the heap. 367 func (h *mheap) init(spans_size uintptr) { 368 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 369 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 370 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 371 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 372 373 // h->mapcache needs no init 374 for i := range h.free { 375 h.free[i].init() 376 h.busy[i].init() 377 } 378 379 h.freelarge.init() 380 h.busylarge.init() 381 for i := range h.central { 382 h.central[i].mcentral.init(int32(i)) 383 } 384 385 sp := (*slice)(unsafe.Pointer(&h_spans)) 386 sp.array = unsafe.Pointer(h.spans) 387 sp.len = int(spans_size / sys.PtrSize) 388 sp.cap = int(spans_size / sys.PtrSize) 389 } 390 391 // mHeap_MapSpans makes sure that the spans are mapped 392 // up to the new value of arena_used. 393 // 394 // It must be called with the expected new value of arena_used, 395 // *before* h.arena_used has been updated. 396 // Waiting to update arena_used until after the memory has been mapped 397 // avoids faults when other threads try access the bitmap immediately 398 // after observing the change to arena_used. 399 func (h *mheap) mapSpans(arena_used uintptr) { 400 // Map spans array, PageSize at a time. 401 n := arena_used 402 n -= h.arena_start 403 n = n / _PageSize * sys.PtrSize 404 n = round(n, sys.PhysPageSize) 405 if h.spans_mapped >= n { 406 return 407 } 408 sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) 409 h.spans_mapped = n 410 } 411 412 // Sweeps spans in list until reclaims at least npages into heap. 413 // Returns the actual number of pages reclaimed. 414 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { 415 n := uintptr(0) 416 sg := mheap_.sweepgen 417 retry: 418 for s := list.first; s != nil; s = s.next { 419 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { 420 list.remove(s) 421 // swept spans are at the end of the list 422 list.insertBack(s) 423 unlock(&h.lock) 424 snpages := s.npages 425 if s.sweep(false) { 426 n += snpages 427 } 428 lock(&h.lock) 429 if n >= npages { 430 return n 431 } 432 // the span could have been moved elsewhere 433 goto retry 434 } 435 if s.sweepgen == sg-1 { 436 // the span is being sweept by background sweeper, skip 437 continue 438 } 439 // already swept empty span, 440 // all subsequent ones must also be either swept or in process of sweeping 441 break 442 } 443 return n 444 } 445 446 // Sweeps and reclaims at least npage pages into heap. 447 // Called before allocating npage pages. 448 func (h *mheap) reclaim(npage uintptr) { 449 // First try to sweep busy spans with large objects of size >= npage, 450 // this has good chances of reclaiming the necessary space. 451 for i := int(npage); i < len(h.busy); i++ { 452 if h.reclaimList(&h.busy[i], npage) != 0 { 453 return // Bingo! 454 } 455 } 456 457 // Then -- even larger objects. 458 if h.reclaimList(&h.busylarge, npage) != 0 { 459 return // Bingo! 460 } 461 462 // Now try smaller objects. 463 // One such object is not enough, so we need to reclaim several of them. 464 reclaimed := uintptr(0) 465 for i := 0; i < int(npage) && i < len(h.busy); i++ { 466 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) 467 if reclaimed >= npage { 468 return 469 } 470 } 471 472 // Now sweep everything that is not yet swept. 473 unlock(&h.lock) 474 for { 475 n := sweepone() 476 if n == ^uintptr(0) { // all spans are swept 477 break 478 } 479 reclaimed += n 480 if reclaimed >= npage { 481 break 482 } 483 } 484 lock(&h.lock) 485 } 486 487 // Allocate a new span of npage pages from the heap for GC'd memory 488 // and record its size class in the HeapMap and HeapMapCache. 489 func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { 490 _g_ := getg() 491 if _g_ != _g_.m.g0 { 492 throw("_mheap_alloc not on g0 stack") 493 } 494 lock(&h.lock) 495 496 // To prevent excessive heap growth, before allocating n pages 497 // we need to sweep and reclaim at least n pages. 498 if h.sweepdone == 0 { 499 // TODO(austin): This tends to sweep a large number of 500 // spans in order to find a few completely free spans 501 // (for example, in the garbage benchmark, this sweeps 502 // ~30x the number of pages its trying to allocate). 503 // If GC kept a bit for whether there were any marks 504 // in a span, we could release these free spans 505 // at the end of GC and eliminate this entirely. 506 h.reclaim(npage) 507 } 508 509 // transfer stats from cache to global 510 memstats.heap_scan += uint64(_g_.m.mcache.local_scan) 511 _g_.m.mcache.local_scan = 0 512 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) 513 _g_.m.mcache.local_tinyallocs = 0 514 515 s := h.allocSpanLocked(npage) 516 if s != nil { 517 // Record span info, because gc needs to be 518 // able to map interior pointer to containing span. 519 atomic.Store(&s.sweepgen, h.sweepgen) 520 s.state = _MSpanInUse 521 s.allocCount = 0 522 s.sizeclass = uint8(sizeclass) 523 if sizeclass == 0 { 524 s.elemsize = s.npages << _PageShift 525 s.divShift = 0 526 s.divMul = 0 527 s.divShift2 = 0 528 s.baseMask = 0 529 } else { 530 s.elemsize = uintptr(class_to_size[sizeclass]) 531 m := &class_to_divmagic[sizeclass] 532 s.divShift = m.shift 533 s.divMul = m.mul 534 s.divShift2 = m.shift2 535 s.baseMask = m.baseMask 536 } 537 538 // update stats, sweep lists 539 h.pagesInUse += uint64(npage) 540 if large { 541 memstats.heap_objects++ 542 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) 543 // Swept spans are at the end of lists. 544 if s.npages < uintptr(len(h.free)) { 545 h.busy[s.npages].insertBack(s) 546 } else { 547 h.busylarge.insertBack(s) 548 } 549 } 550 } 551 // heap_scan and heap_live were updated. 552 if gcBlackenEnabled != 0 { 553 gcController.revise() 554 } 555 556 if trace.enabled { 557 traceHeapAlloc() 558 } 559 560 // h_spans is accessed concurrently without synchronization 561 // from other threads. Hence, there must be a store/store 562 // barrier here to ensure the writes to h_spans above happen 563 // before the caller can publish a pointer p to an object 564 // allocated from s. As soon as this happens, the garbage 565 // collector running on another processor could read p and 566 // look up s in h_spans. The unlock acts as the barrier to 567 // order these writes. On the read side, the data dependency 568 // between p and the index in h_spans orders the reads. 569 unlock(&h.lock) 570 return s 571 } 572 573 func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan { 574 // Don't do any operations that lock the heap on the G stack. 575 // It might trigger stack growth, and the stack growth code needs 576 // to be able to allocate heap. 577 var s *mspan 578 systemstack(func() { 579 s = h.alloc_m(npage, sizeclass, large) 580 }) 581 582 if s != nil { 583 if needzero && s.needzero != 0 { 584 memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift) 585 } 586 s.needzero = 0 587 } 588 return s 589 } 590 591 func (h *mheap) allocStack(npage uintptr) *mspan { 592 _g_ := getg() 593 if _g_ != _g_.m.g0 { 594 throw("mheap_allocstack not on g0 stack") 595 } 596 lock(&h.lock) 597 s := h.allocSpanLocked(npage) 598 if s != nil { 599 s.state = _MSpanStack 600 s.stackfreelist = 0 601 s.allocCount = 0 602 memstats.stacks_inuse += uint64(s.npages << _PageShift) 603 } 604 605 // This unlock acts as a release barrier. See mHeap_Alloc_m. 606 unlock(&h.lock) 607 return s 608 } 609 610 // Allocates a span of the given size. h must be locked. 611 // The returned span has been removed from the 612 // free list, but its state is still MSpanFree. 613 func (h *mheap) allocSpanLocked(npage uintptr) *mspan { 614 var list *mSpanList 615 var s *mspan 616 617 // Try in fixed-size lists up to max. 618 for i := int(npage); i < len(h.free); i++ { 619 list = &h.free[i] 620 if !list.isEmpty() { 621 s = list.first 622 goto HaveSpan 623 } 624 } 625 626 // Best fit in list of large spans. 627 list = &h.freelarge 628 s = h.allocLarge(npage) 629 if s == nil { 630 if !h.grow(npage) { 631 return nil 632 } 633 s = h.allocLarge(npage) 634 if s == nil { 635 return nil 636 } 637 } 638 639 HaveSpan: 640 // Mark span in use. 641 if s.state != _MSpanFree { 642 throw("MHeap_AllocLocked - MSpan not free") 643 } 644 if s.npages < npage { 645 throw("MHeap_AllocLocked - bad npages") 646 } 647 list.remove(s) 648 if s.inList() { 649 throw("still in list") 650 } 651 if s.npreleased > 0 { 652 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) 653 memstats.heap_released -= uint64(s.npreleased << _PageShift) 654 s.npreleased = 0 655 } 656 657 if s.npages > npage { 658 // Trim extra and put it back in the heap. 659 t := (*mspan)(h.spanalloc.alloc()) 660 t.init(s.base()+npage<<_PageShift, s.npages-npage) 661 s.npages = npage 662 p := (t.base() - h.arena_start) >> _PageShift 663 if p > 0 { 664 h_spans[p-1] = s 665 } 666 h_spans[p] = t 667 h_spans[p+t.npages-1] = t 668 t.needzero = s.needzero 669 s.state = _MSpanStack // prevent coalescing with s 670 t.state = _MSpanStack 671 h.freeSpanLocked(t, false, false, s.unusedsince) 672 s.state = _MSpanFree 673 } 674 s.unusedsince = 0 675 676 p := (s.base() - h.arena_start) >> _PageShift 677 for n := uintptr(0); n < npage; n++ { 678 h_spans[p+n] = s 679 } 680 681 memstats.heap_inuse += uint64(npage << _PageShift) 682 memstats.heap_idle -= uint64(npage << _PageShift) 683 684 //println("spanalloc", hex(s.start<<_PageShift)) 685 if s.inList() { 686 throw("still in list") 687 } 688 return s 689 } 690 691 // Allocate a span of exactly npage pages from the list of large spans. 692 func (h *mheap) allocLarge(npage uintptr) *mspan { 693 return bestFit(&h.freelarge, npage, nil) 694 } 695 696 // Search list for smallest span with >= npage pages. 697 // If there are multiple smallest spans, take the one 698 // with the earliest starting address. 699 func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan { 700 for s := list.first; s != nil; s = s.next { 701 if s.npages < npage { 702 continue 703 } 704 if best == nil || s.npages < best.npages || (s.npages == best.npages && s.base() < best.base()) { 705 best = s 706 } 707 } 708 return best 709 } 710 711 // Try to add at least npage pages of memory to the heap, 712 // returning whether it worked. 713 // 714 // h must be locked. 715 func (h *mheap) grow(npage uintptr) bool { 716 // Ask for a big chunk, to reduce the number of mappings 717 // the operating system needs to track; also amortizes 718 // the overhead of an operating system mapping. 719 // Allocate a multiple of 64kB. 720 npage = round(npage, (64<<10)/_PageSize) 721 ask := npage << _PageShift 722 if ask < _HeapAllocChunk { 723 ask = _HeapAllocChunk 724 } 725 726 v := h.sysAlloc(ask) 727 if v == nil { 728 if ask > npage<<_PageShift { 729 ask = npage << _PageShift 730 v = h.sysAlloc(ask) 731 } 732 if v == nil { 733 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") 734 return false 735 } 736 } 737 738 // Create a fake "in use" span and free it, so that the 739 // right coalescing happens. 740 s := (*mspan)(h.spanalloc.alloc()) 741 s.init(uintptr(v), ask>>_PageShift) 742 p := (s.base() - h.arena_start) >> _PageShift 743 for i := p; i < p+s.npages; i++ { 744 h_spans[i] = s 745 } 746 atomic.Store(&s.sweepgen, h.sweepgen) 747 s.state = _MSpanInUse 748 h.pagesInUse += uint64(s.npages) 749 h.freeSpanLocked(s, false, true, 0) 750 return true 751 } 752 753 // Look up the span at the given address. 754 // Address is guaranteed to be in map 755 // and is guaranteed to be start or end of span. 756 func (h *mheap) lookup(v unsafe.Pointer) *mspan { 757 p := uintptr(v) 758 p -= h.arena_start 759 return h_spans[p>>_PageShift] 760 } 761 762 // Look up the span at the given address. 763 // Address is *not* guaranteed to be in map 764 // and may be anywhere in the span. 765 // Map entries for the middle of a span are only 766 // valid for allocated spans. Free spans may have 767 // other garbage in their middles, so we have to 768 // check for that. 769 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { 770 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { 771 return nil 772 } 773 s := h_spans[(uintptr(v)-h.arena_start)>>_PageShift] 774 if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { 775 return nil 776 } 777 return s 778 } 779 780 // Free the span back into the heap. 781 func (h *mheap) freeSpan(s *mspan, acct int32) { 782 systemstack(func() { 783 mp := getg().m 784 lock(&h.lock) 785 memstats.heap_scan += uint64(mp.mcache.local_scan) 786 mp.mcache.local_scan = 0 787 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) 788 mp.mcache.local_tinyallocs = 0 789 if msanenabled { 790 // Tell msan that this entire span is no longer in use. 791 base := unsafe.Pointer(s.base()) 792 bytes := s.npages << _PageShift 793 msanfree(base, bytes) 794 } 795 if acct != 0 { 796 memstats.heap_objects-- 797 } 798 if gcBlackenEnabled != 0 { 799 // heap_scan changed. 800 gcController.revise() 801 } 802 h.freeSpanLocked(s, true, true, 0) 803 unlock(&h.lock) 804 }) 805 } 806 807 func (h *mheap) freeStack(s *mspan) { 808 _g_ := getg() 809 if _g_ != _g_.m.g0 { 810 throw("mheap_freestack not on g0 stack") 811 } 812 s.needzero = 1 813 lock(&h.lock) 814 memstats.stacks_inuse -= uint64(s.npages << _PageShift) 815 h.freeSpanLocked(s, true, true, 0) 816 unlock(&h.lock) 817 } 818 819 // s must be on a busy list (h.busy or h.busylarge) or unlinked. 820 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { 821 switch s.state { 822 case _MSpanStack: 823 if s.allocCount != 0 { 824 throw("MHeap_FreeSpanLocked - invalid stack free") 825 } 826 case _MSpanInUse: 827 if s.allocCount != 0 || s.sweepgen != h.sweepgen { 828 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 829 throw("MHeap_FreeSpanLocked - invalid free") 830 } 831 h.pagesInUse -= uint64(s.npages) 832 default: 833 throw("MHeap_FreeSpanLocked - invalid span state") 834 } 835 836 if acctinuse { 837 memstats.heap_inuse -= uint64(s.npages << _PageShift) 838 } 839 if acctidle { 840 memstats.heap_idle += uint64(s.npages << _PageShift) 841 } 842 s.state = _MSpanFree 843 if s.inList() { 844 h.busyList(s.npages).remove(s) 845 } 846 847 // Stamp newly unused spans. The scavenger will use that 848 // info to potentially give back some pages to the OS. 849 s.unusedsince = unusedsince 850 if unusedsince == 0 { 851 s.unusedsince = nanotime() 852 } 853 s.npreleased = 0 854 855 // Coalesce with earlier, later spans. 856 p := (s.base() - h.arena_start) >> _PageShift 857 if p > 0 { 858 t := h_spans[p-1] 859 if t != nil && t.state == _MSpanFree { 860 s.startAddr = t.startAddr 861 s.npages += t.npages 862 s.npreleased = t.npreleased // absorb released pages 863 s.needzero |= t.needzero 864 p -= t.npages 865 h_spans[p] = s 866 h.freeList(t.npages).remove(t) 867 t.state = _MSpanDead 868 h.spanalloc.free(unsafe.Pointer(t)) 869 } 870 } 871 if (p+s.npages)*sys.PtrSize < h.spans_mapped { 872 t := h_spans[p+s.npages] 873 if t != nil && t.state == _MSpanFree { 874 s.npages += t.npages 875 s.npreleased += t.npreleased 876 s.needzero |= t.needzero 877 h_spans[p+s.npages-1] = s 878 h.freeList(t.npages).remove(t) 879 t.state = _MSpanDead 880 h.spanalloc.free(unsafe.Pointer(t)) 881 } 882 } 883 884 // Insert s into appropriate list. 885 h.freeList(s.npages).insert(s) 886 } 887 888 func (h *mheap) freeList(npages uintptr) *mSpanList { 889 if npages < uintptr(len(h.free)) { 890 return &h.free[npages] 891 } 892 return &h.freelarge 893 } 894 895 func (h *mheap) busyList(npages uintptr) *mSpanList { 896 if npages < uintptr(len(h.free)) { 897 return &h.busy[npages] 898 } 899 return &h.busylarge 900 } 901 902 func scavengelist(list *mSpanList, now, limit uint64) uintptr { 903 if list.isEmpty() { 904 return 0 905 } 906 907 var sumreleased uintptr 908 for s := list.first; s != nil; s = s.next { 909 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { 910 start := s.base() 911 end := start + s.npages<<_PageShift 912 if sys.PhysPageSize > _PageSize { 913 // We can only release pages in 914 // PhysPageSize blocks, so round start 915 // and end in. (Otherwise, madvise 916 // will round them *out* and release 917 // more memory than we want.) 918 start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1) 919 end &^= sys.PhysPageSize - 1 920 if end <= start { 921 // start and end don't span a 922 // whole physical page. 923 continue 924 } 925 } 926 len := end - start 927 928 released := len - (s.npreleased << _PageShift) 929 if sys.PhysPageSize > _PageSize && released == 0 { 930 continue 931 } 932 memstats.heap_released += uint64(released) 933 sumreleased += released 934 s.npreleased = len >> _PageShift 935 sysUnused(unsafe.Pointer(start), len) 936 } 937 } 938 return sumreleased 939 } 940 941 func (h *mheap) scavenge(k int32, now, limit uint64) { 942 lock(&h.lock) 943 var sumreleased uintptr 944 for i := 0; i < len(h.free); i++ { 945 sumreleased += scavengelist(&h.free[i], now, limit) 946 } 947 sumreleased += scavengelist(&h.freelarge, now, limit) 948 unlock(&h.lock) 949 950 if debug.gctrace > 0 { 951 if sumreleased > 0 { 952 print("scvg", k, ": ", sumreleased>>20, " MB released\n") 953 } 954 // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap. 955 // But we can't call ReadMemStats on g0 holding locks. 956 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") 957 } 958 } 959 960 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 961 func runtime_debug_freeOSMemory() { 962 gcStart(gcForceBlockMode, false) 963 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) 964 } 965 966 // Initialize a new span with the given start and npages. 967 func (span *mspan) init(base uintptr, npages uintptr) { 968 span.next = nil 969 span.prev = nil 970 span.list = nil 971 span.startAddr = base 972 span.npages = npages 973 span.allocCount = 0 974 span.sizeclass = 0 975 span.incache = false 976 span.elemsize = 0 977 span.state = _MSpanDead 978 span.unusedsince = 0 979 span.npreleased = 0 980 span.speciallock.key = 0 981 span.specials = nil 982 span.needzero = 0 983 span.freeindex = 0 984 span.allocBits = nil 985 span.gcmarkBits = nil 986 } 987 988 func (span *mspan) inList() bool { 989 return span.prev != nil 990 } 991 992 // Initialize an empty doubly-linked list. 993 func (list *mSpanList) init() { 994 list.first = nil 995 list.last = &list.first 996 } 997 998 func (list *mSpanList) remove(span *mspan) { 999 if span.prev == nil || span.list != list { 1000 println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list) 1001 throw("MSpanList_Remove") 1002 } 1003 if span.next != nil { 1004 span.next.prev = span.prev 1005 } else { 1006 // TODO: After we remove the span.list != list check above, 1007 // we could at least still check list.last == &span.next here. 1008 list.last = span.prev 1009 } 1010 *span.prev = span.next 1011 span.next = nil 1012 span.prev = nil 1013 span.list = nil 1014 } 1015 1016 func (list *mSpanList) isEmpty() bool { 1017 return list.first == nil 1018 } 1019 1020 func (list *mSpanList) insert(span *mspan) { 1021 if span.next != nil || span.prev != nil || span.list != nil { 1022 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list) 1023 throw("MSpanList_Insert") 1024 } 1025 span.next = list.first 1026 if list.first != nil { 1027 list.first.prev = &span.next 1028 } else { 1029 list.last = &span.next 1030 } 1031 list.first = span 1032 span.prev = &list.first 1033 span.list = list 1034 } 1035 1036 func (list *mSpanList) insertBack(span *mspan) { 1037 if span.next != nil || span.prev != nil || span.list != nil { 1038 println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list) 1039 throw("MSpanList_InsertBack") 1040 } 1041 span.next = nil 1042 span.prev = list.last 1043 *list.last = span 1044 list.last = &span.next 1045 span.list = list 1046 } 1047 1048 const ( 1049 _KindSpecialFinalizer = 1 1050 _KindSpecialProfile = 2 1051 // Note: The finalizer special must be first because if we're freeing 1052 // an object, a finalizer special will cause the freeing operation 1053 // to abort, and we want to keep the other special records around 1054 // if that happens. 1055 ) 1056 1057 type special struct { 1058 next *special // linked list in span 1059 offset uint16 // span offset of object 1060 kind byte // kind of special 1061 } 1062 1063 // Adds the special record s to the list of special records for 1064 // the object p. All fields of s should be filled in except for 1065 // offset & next, which this routine will fill in. 1066 // Returns true if the special was successfully added, false otherwise. 1067 // (The add will fail only if a record with the same p and s->kind 1068 // already exists.) 1069 func addspecial(p unsafe.Pointer, s *special) bool { 1070 span := mheap_.lookupMaybe(p) 1071 if span == nil { 1072 throw("addspecial on invalid pointer") 1073 } 1074 1075 // Ensure that the span is swept. 1076 // Sweeping accesses the specials list w/o locks, so we have 1077 // to synchronize with it. And it's just much safer. 1078 mp := acquirem() 1079 span.ensureSwept() 1080 1081 offset := uintptr(p) - span.base() 1082 kind := s.kind 1083 1084 lock(&span.speciallock) 1085 1086 // Find splice point, check for existing record. 1087 t := &span.specials 1088 for { 1089 x := *t 1090 if x == nil { 1091 break 1092 } 1093 if offset == uintptr(x.offset) && kind == x.kind { 1094 unlock(&span.speciallock) 1095 releasem(mp) 1096 return false // already exists 1097 } 1098 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) { 1099 break 1100 } 1101 t = &x.next 1102 } 1103 1104 // Splice in record, fill in offset. 1105 s.offset = uint16(offset) 1106 s.next = *t 1107 *t = s 1108 unlock(&span.speciallock) 1109 releasem(mp) 1110 1111 return true 1112 } 1113 1114 // Removes the Special record of the given kind for the object p. 1115 // Returns the record if the record existed, nil otherwise. 1116 // The caller must FixAlloc_Free the result. 1117 func removespecial(p unsafe.Pointer, kind uint8) *special { 1118 span := mheap_.lookupMaybe(p) 1119 if span == nil { 1120 throw("removespecial on invalid pointer") 1121 } 1122 1123 // Ensure that the span is swept. 1124 // Sweeping accesses the specials list w/o locks, so we have 1125 // to synchronize with it. And it's just much safer. 1126 mp := acquirem() 1127 span.ensureSwept() 1128 1129 offset := uintptr(p) - span.base() 1130 1131 lock(&span.speciallock) 1132 t := &span.specials 1133 for { 1134 s := *t 1135 if s == nil { 1136 break 1137 } 1138 // This function is used for finalizers only, so we don't check for 1139 // "interior" specials (p must be exactly equal to s->offset). 1140 if offset == uintptr(s.offset) && kind == s.kind { 1141 *t = s.next 1142 unlock(&span.speciallock) 1143 releasem(mp) 1144 return s 1145 } 1146 t = &s.next 1147 } 1148 unlock(&span.speciallock) 1149 releasem(mp) 1150 return nil 1151 } 1152 1153 // The described object has a finalizer set for it. 1154 type specialfinalizer struct { 1155 special special 1156 fn *funcval 1157 nret uintptr 1158 fint *_type 1159 ot *ptrtype 1160 } 1161 1162 // Adds a finalizer to the object p. Returns true if it succeeded. 1163 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 1164 lock(&mheap_.speciallock) 1165 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 1166 unlock(&mheap_.speciallock) 1167 s.special.kind = _KindSpecialFinalizer 1168 s.fn = f 1169 s.nret = nret 1170 s.fint = fint 1171 s.ot = ot 1172 if addspecial(p, &s.special) { 1173 // This is responsible for maintaining the same 1174 // GC-related invariants as markrootSpans in any 1175 // situation where it's possible that markrootSpans 1176 // has already run but mark termination hasn't yet. 1177 if gcphase != _GCoff { 1178 _, base, _ := findObject(p) 1179 mp := acquirem() 1180 gcw := &mp.p.ptr().gcw 1181 // Mark everything reachable from the object 1182 // so it's retained for the finalizer. 1183 scanobject(uintptr(base), gcw) 1184 // Mark the finalizer itself, since the 1185 // special isn't part of the GC'd heap. 1186 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) 1187 if gcBlackenPromptly { 1188 gcw.dispose() 1189 } 1190 releasem(mp) 1191 } 1192 return true 1193 } 1194 1195 // There was an old finalizer 1196 lock(&mheap_.speciallock) 1197 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1198 unlock(&mheap_.speciallock) 1199 return false 1200 } 1201 1202 // Removes the finalizer (if any) from the object p. 1203 func removefinalizer(p unsafe.Pointer) { 1204 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 1205 if s == nil { 1206 return // there wasn't a finalizer to remove 1207 } 1208 lock(&mheap_.speciallock) 1209 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 1210 unlock(&mheap_.speciallock) 1211 } 1212 1213 // The described object is being heap profiled. 1214 type specialprofile struct { 1215 special special 1216 b *bucket 1217 } 1218 1219 // Set the heap profile bucket associated with addr to b. 1220 func setprofilebucket(p unsafe.Pointer, b *bucket) { 1221 lock(&mheap_.speciallock) 1222 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 1223 unlock(&mheap_.speciallock) 1224 s.special.kind = _KindSpecialProfile 1225 s.b = b 1226 if !addspecial(p, &s.special) { 1227 throw("setprofilebucket: profile already set") 1228 } 1229 } 1230 1231 // Do whatever cleanup needs to be done to deallocate s. It has 1232 // already been unlinked from the MSpan specials list. 1233 func freespecial(s *special, p unsafe.Pointer, size uintptr) { 1234 switch s.kind { 1235 case _KindSpecialFinalizer: 1236 sf := (*specialfinalizer)(unsafe.Pointer(s)) 1237 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 1238 lock(&mheap_.speciallock) 1239 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 1240 unlock(&mheap_.speciallock) 1241 case _KindSpecialProfile: 1242 sp := (*specialprofile)(unsafe.Pointer(s)) 1243 mProf_Free(sp.b, size) 1244 lock(&mheap_.speciallock) 1245 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 1246 unlock(&mheap_.speciallock) 1247 default: 1248 throw("bad special kind") 1249 panic("not reached") 1250 } 1251 } 1252 1253 const gcBitsChunkBytes = uintptr(64 << 10) 1254 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) 1255 1256 type gcBitsHeader struct { 1257 free uintptr // free is the index into bits of the next free byte. 1258 next uintptr // *gcBits triggers recursive type bug. (issue 14620) 1259 } 1260 1261 type gcBits struct { 1262 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. 1263 free uintptr // free is the index into bits of the next free byte. 1264 next *gcBits 1265 bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8 1266 } 1267 1268 var gcBitsArenas struct { 1269 lock mutex 1270 free *gcBits 1271 next *gcBits 1272 current *gcBits 1273 previous *gcBits 1274 } 1275 1276 // newMarkBits returns a pointer to 8 byte aligned bytes 1277 // to be used for a span's mark bits. 1278 func newMarkBits(nelems uintptr) *uint8 { 1279 lock(&gcBitsArenas.lock) 1280 blocksNeeded := uintptr((nelems + 63) / 64) 1281 bytesNeeded := blocksNeeded * 8 1282 if gcBitsArenas.next == nil || 1283 gcBitsArenas.next.free+bytesNeeded > uintptr(len(gcBits{}.bits)) { 1284 // Allocate a new arena. 1285 fresh := newArena() 1286 fresh.next = gcBitsArenas.next 1287 gcBitsArenas.next = fresh 1288 } 1289 if gcBitsArenas.next.free >= gcBitsChunkBytes { 1290 println("runtime: gcBitsArenas.next.free=", gcBitsArenas.next.free, gcBitsChunkBytes) 1291 throw("markBits overflow") 1292 } 1293 result := &gcBitsArenas.next.bits[gcBitsArenas.next.free] 1294 gcBitsArenas.next.free += bytesNeeded 1295 unlock(&gcBitsArenas.lock) 1296 return result 1297 } 1298 1299 // newAllocBits returns a pointer to 8 byte aligned bytes 1300 // to be used for this span's alloc bits. 1301 // newAllocBits is used to provide newly initialized spans 1302 // allocation bits. For spans not being initialized the 1303 // the mark bits are repurposed as allocation bits when 1304 // the span is swept. 1305 func newAllocBits(nelems uintptr) *uint8 { 1306 return newMarkBits(nelems) 1307 } 1308 1309 // nextMarkBitArenaEpoch establishes a new epoch for the arenas 1310 // holding the mark bits. The arenas are named relative to the 1311 // current GC cycle which is demarcated by the call to finishweep_m. 1312 // 1313 // All current spans have been swept. 1314 // During that sweep each span allocated room for its gcmarkBits in 1315 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current 1316 // where the GC will mark objects and after each span is swept these bits 1317 // will be used to allocate objects. 1318 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's 1319 // gcAllocBits live until all the spans have been swept during this GC cycle. 1320 // The span's sweep extinguishes all the references to gcBitsArenas.previous 1321 // by pointing gcAllocBits into the gcBitsArenas.current. 1322 // The gcBitsArenas.previous is released to the gcBitsArenas.free list. 1323 func nextMarkBitArenaEpoch() { 1324 lock(&gcBitsArenas.lock) 1325 if gcBitsArenas.previous != nil { 1326 if gcBitsArenas.free == nil { 1327 gcBitsArenas.free = gcBitsArenas.previous 1328 } else { 1329 // Find end of previous arenas. 1330 last := gcBitsArenas.previous 1331 for last = gcBitsArenas.previous; last.next != nil; last = last.next { 1332 } 1333 last.next = gcBitsArenas.free 1334 gcBitsArenas.free = gcBitsArenas.previous 1335 } 1336 } 1337 gcBitsArenas.previous = gcBitsArenas.current 1338 gcBitsArenas.current = gcBitsArenas.next 1339 gcBitsArenas.next = nil // newMarkBits calls newArena when needed 1340 unlock(&gcBitsArenas.lock) 1341 } 1342 1343 // newArena allocates and zeroes a gcBits arena. 1344 func newArena() *gcBits { 1345 var result *gcBits 1346 if gcBitsArenas.free == nil { 1347 result = (*gcBits)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys)) 1348 if result == nil { 1349 throw("runtime: cannot allocate memory") 1350 } 1351 } else { 1352 result = gcBitsArenas.free 1353 gcBitsArenas.free = gcBitsArenas.free.next 1354 memclr(unsafe.Pointer(result), gcBitsChunkBytes) 1355 } 1356 result.next = nil 1357 // If result.bits is not 8 byte aligned adjust index so 1358 // that &result.bits[result.free] is 8 byte aligned. 1359 if uintptr(unsafe.Offsetof(gcBits{}.bits))&7 == 0 { 1360 result.free = 0 1361 } else { 1362 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) 1363 } 1364 return result 1365 }