github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/mgc.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup.
     6  // It has gotten completely out of control.
     7  
     8  // Garbage collector (GC).
     9  //
    10  // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
    11  // thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
    12  // non-generational and non-compacting. Allocation is done using size segregated per P allocation
    13  // areas to minimize fragmentation while eliminating locks in the common case.
    14  //
    15  // The algorithm decomposes into several steps.
    16  // This is a high level description of the algorithm being used. For an overview of GC a good
    17  // place to start is Richard Jones' gchandbook.org.
    18  //
    19  // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
    20  // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
    21  // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
    22  // For journal quality proofs that these steps are complete, correct, and terminate see
    23  // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
    24  // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
    25  //
    26  //  0. Set phase = GCscan from GCoff.
    27  //  1. Wait for all P's to acknowledge phase change.
    28  //         At this point all goroutines have passed through a GC safepoint and
    29  //         know we are in the GCscan phase.
    30  //  2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
    31  //       (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
    32  //       Preempted goroutines are scanned before P schedules next goroutine.
    33  //  3. Set phase = GCmark.
    34  //  4. Wait for all P's to acknowledge phase change.
    35  //  5. Now write barrier marks and enqueues black, grey, or white to white pointers.
    36  //       Malloc still allocates white (non-marked) objects.
    37  //  6. Meanwhile GC transitively walks the heap marking reachable objects.
    38  //  7. When GC finishes marking heap, it preempts P's one-by-one and
    39  //       retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
    40  //       currently scheduled on the P).
    41  //  8. Once the GC has exhausted all available marking work it sets phase = marktermination.
    42  //  9. Wait for all P's to acknowledge phase change.
    43  // 10. Malloc now allocates black objects, so number of unmarked reachable objects
    44  //        monotonically decreases.
    45  // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
    46  // 12. When GC completes a full cycle over P's and discovers no new grey
    47  //         objects, (which means all reachable objects are marked) set phase = GCsweep.
    48  // 13. Wait for all P's to acknowledge phase change.
    49  // 14. Now malloc allocates white (but sweeps spans before use).
    50  //         Write barrier becomes nop.
    51  // 15. GC does background sweeping, see description below.
    52  // 16. When sweeping is complete set phase to GCoff.
    53  // 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
    54  //         see discussion of GC rate below.
    55  
    56  // Changing phases.
    57  // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
    58  // All phase action must be benign in the presence of a change.
    59  // Starting with GCoff
    60  // GCoff to GCscan
    61  //     GSscan scans stacks and globals greying them and never marks an object black.
    62  //     Once all the P's are aware of the new phase they will scan gs on preemption.
    63  //     This means that the scanning of preempted gs can't start until all the Ps
    64  //     have acknowledged.
    65  // GCscan to GCmark
    66  //     GCMark turns on the write barrier which also only greys objects. No scanning
    67  //     of objects (making them black) can happen until all the Ps have acknowledged
    68  //     the phase change.
    69  // GCmark to GCmarktermination
    70  //     The only change here is that we start allocating black so the Ps must acknowledge
    71  //     the change before we begin the termination algorithm
    72  // GCmarktermination to GSsweep
    73  //     Object currently on the freelist must be marked black for this to work.
    74  //     Are things on the free lists black or white? How does the sweep phase work?
    75  
    76  // Concurrent sweep.
    77  // The sweep phase proceeds concurrently with normal program execution.
    78  // The heap is swept span-by-span both lazily (when a goroutine needs another span)
    79  // and concurrently in a background goroutine (this helps programs that are not CPU bound).
    80  // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
    81  // and so next_gc calculation is tricky and happens as follows.
    82  // At the end of the stop-the-world phase next_gc is conservatively set based on total
    83  // heap size; all spans are marked as "needs sweeping".
    84  // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
    85  // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
    86  // closer to the target value. However, this is not enough to avoid over-allocating memory.
    87  // Consider that a goroutine wants to allocate a new span for a large object and
    88  // there are no free swept spans, but there are small-object unswept spans.
    89  // If the goroutine naively allocates a new span, it can surpass the yet-unknown
    90  // target next_gc value. In order to prevent such cases (1) when a goroutine needs
    91  // to allocate a new small-object span, it sweeps small-object spans for the same
    92  // object size until it frees at least one object; (2) when a goroutine needs to
    93  // allocate large-object span from heap, it sweeps spans until it frees at least
    94  // that many pages into heap. Together these two measures ensure that we don't surpass
    95  // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
    96  // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
    97  // but there can still be other one-page unswept spans which could be combined into a two-page span.
    98  // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
    99  // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
   100  // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
   101  // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
   102  // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
   103  // The finalizer goroutine is kicked off only when all spans are swept.
   104  // When the next GC starts, it sweeps all not-yet-swept spans (if any).
   105  
   106  // GC rate.
   107  // Next GC is after we've allocated an extra amount of memory proportional to
   108  // the amount already in use. The proportion is controlled by GOGC environment variable
   109  // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
   110  // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
   111  // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
   112  // (and also the amount of extra memory used).
   113  
   114  package runtime
   115  
   116  import "unsafe"
   117  
   118  const (
   119  	_DebugGC         = 0
   120  	_DebugGCPtrs     = false // if true, print trace of every pointer load during GC
   121  	_ConcurrentSweep = true
   122  
   123  	_WorkbufSize     = 4 * 1024
   124  	_FinBlockSize    = 4 * 1024
   125  	_RootData        = 0
   126  	_RootBss         = 1
   127  	_RootFinalizers  = 2
   128  	_RootSpans       = 3
   129  	_RootFlushCaches = 4
   130  	_RootCount       = 5
   131  )
   132  
   133  // ptrmask for an allocation containing a single pointer.
   134  var oneptr = [...]uint8{bitsPointer}
   135  
   136  // Initialized from $GOGC.  GOGC=off means no GC.
   137  var gcpercent int32
   138  
   139  // Holding worldsema grants an M the right to try to stop the world.
   140  // The procedure is:
   141  //
   142  //	semacquire(&worldsema);
   143  //	m.gcing = 1;
   144  //	stoptheworld();
   145  //
   146  //	... do stuff ...
   147  //
   148  //	m.gcing = 0;
   149  //	semrelease(&worldsema);
   150  //	starttheworld();
   151  //
   152  var worldsema uint32 = 1
   153  
   154  // It is a bug if bits does not have bitBoundary set but
   155  // there are still some cases where this happens related
   156  // to stack spans.
   157  type markbits struct {
   158  	bitp  *byte   // pointer to the byte holding xbits
   159  	shift uintptr // bits xbits needs to be shifted to get bits
   160  	xbits byte    // byte holding all the bits from *bitp
   161  	bits  byte    // mark and boundary bits relevant to corresponding slot.
   162  	tbits byte    // pointer||scalar bits relevant to corresponding slot.
   163  }
   164  
   165  type workbuf struct {
   166  	node lfnode // must be first
   167  	nobj uintptr
   168  	obj  [(_WorkbufSize - unsafe.Sizeof(lfnode{}) - ptrSize) / ptrSize]uintptr
   169  }
   170  
   171  var data, edata, bss, ebss, gcdata, gcbss struct{}
   172  
   173  var finlock mutex  // protects the following variables
   174  var fing *g        // goroutine that runs finalizers
   175  var finq *finblock // list of finalizers that are to be executed
   176  var finc *finblock // cache of free blocks
   177  var finptrmask [_FinBlockSize / ptrSize / pointersPerByte]byte
   178  var fingwait bool
   179  var fingwake bool
   180  var allfin *finblock // list of all blocks
   181  
   182  var gcdatamask bitvector
   183  var gcbssmask bitvector
   184  
   185  var gclock mutex
   186  
   187  var badblock [1024]uintptr
   188  var nbadblock int32
   189  
   190  type workdata struct {
   191  	full    uint64                // lock-free list of full blocks
   192  	empty   uint64                // lock-free list of empty blocks
   193  	partial uint64                // lock-free list of partially filled blocks
   194  	pad0    [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
   195  	nproc   uint32
   196  	tstart  int64
   197  	nwait   uint32
   198  	ndone   uint32
   199  	alldone note
   200  	markfor *parfor
   201  
   202  	// Copy of mheap.allspans for marker or sweeper.
   203  	spans []*mspan
   204  }
   205  
   206  var work workdata
   207  
   208  //go:linkname weak_cgo_allocate go.weak.runtime._cgo_allocate_internal
   209  var weak_cgo_allocate byte
   210  
   211  // Is _cgo_allocate linked into the binary?
   212  func have_cgo_allocate() bool {
   213  	return &weak_cgo_allocate != nil
   214  }
   215  
   216  // To help debug the concurrent GC we remark with the world
   217  // stopped ensuring that any object encountered has their normal
   218  // mark bit set. To do this we use an orthogonal bit
   219  // pattern to indicate the object is marked. The following pattern
   220  // uses the upper two bits in the object's bounday nibble.
   221  // 01: scalar  not marked
   222  // 10: pointer not marked
   223  // 11: pointer     marked
   224  // 00: scalar      marked
   225  // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
   226  // The higher bit is 1 for pointers and 0 for scalars, whether the object
   227  // is marked or not.
   228  // The first nibble no longer holds the bitsDead pattern indicating that the
   229  // there are no more pointers in the object. This information is held
   230  // in the second nibble.
   231  
   232  // When marking an object if the bool checkmark is true one uses the above
   233  // encoding, otherwise one uses the bitMarked bit in the lower two bits
   234  // of the nibble.
   235  var (
   236  	checkmark         = false
   237  	gccheckmarkenable = true
   238  )
   239  
   240  // Is address b in the known heap. If it doesn't have a valid gcmap
   241  // returns false. For example pointers into stacks will return false.
   242  func inheap(b uintptr) bool {
   243  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   244  		return false
   245  	}
   246  	// Not a beginning of a block, consult span table to find the block beginning.
   247  	k := b >> _PageShift
   248  	x := k
   249  	x -= mheap_.arena_start >> _PageShift
   250  	s := h_spans[x]
   251  	if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   252  		return false
   253  	}
   254  	return true
   255  }
   256  
   257  // Given an address in the heap return the relevant byte from the gcmap. This routine
   258  // can be used on addresses to the start of an object or to the interior of the an object.
   259  func slottombits(obj uintptr, mbits *markbits) {
   260  	off := (obj&^(ptrSize-1) - mheap_.arena_start) / ptrSize
   261  	mbits.bitp = (*byte)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
   262  	mbits.shift = off % wordsPerBitmapByte * gcBits
   263  	mbits.xbits = *mbits.bitp
   264  	mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
   265  	mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
   266  }
   267  
   268  // b is a pointer into the heap.
   269  // Find the start of the object refered to by b.
   270  // Set mbits to the associated bits from the bit map.
   271  // If b is not a valid heap object return nil and
   272  // undefined values in mbits.
   273  func objectstart(b uintptr, mbits *markbits) uintptr {
   274  	obj := b &^ (ptrSize - 1)
   275  	for {
   276  		slottombits(obj, mbits)
   277  		if mbits.bits&bitBoundary == bitBoundary {
   278  			break
   279  		}
   280  
   281  		// Not a beginning of a block, consult span table to find the block beginning.
   282  		k := b >> _PageShift
   283  		x := k
   284  		x -= mheap_.arena_start >> _PageShift
   285  		s := h_spans[x]
   286  		if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   287  			if s != nil && s.state == _MSpanStack {
   288  				return 0 // This is legit.
   289  			}
   290  
   291  			// The following ensures that we are rigorous about what data
   292  			// structures hold valid pointers
   293  			if false {
   294  				// Still happens sometimes. We don't know why.
   295  				printlock()
   296  				print("runtime:objectstart Span weird: obj=", hex(obj), " k=", hex(k))
   297  				if s == nil {
   298  					print(" s=nil\n")
   299  				} else {
   300  					print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
   301  				}
   302  				printunlock()
   303  				gothrow("objectstart: bad pointer in unexpected span")
   304  			}
   305  			return 0
   306  		}
   307  
   308  		p := uintptr(s.start) << _PageShift
   309  		if s.sizeclass != 0 {
   310  			size := s.elemsize
   311  			idx := (obj - p) / size
   312  			p = p + idx*size
   313  		}
   314  		if p == obj {
   315  			print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), "\n")
   316  			gothrow("failed to find block beginning")
   317  		}
   318  		obj = p
   319  	}
   320  
   321  	// if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
   322  	// Clear any low bits to get to the start of the object.
   323  	// greyobject depends on this.
   324  	return obj
   325  }
   326  
   327  // Slow for now as we serialize this, since this is on a debug path
   328  // speed is not critical at this point.
   329  var andlock mutex
   330  
   331  func atomicand8(src *byte, val byte) {
   332  	lock(&andlock)
   333  	*src &= val
   334  	unlock(&andlock)
   335  }
   336  
   337  // Mark using the checkmark scheme.
   338  func docheckmark(mbits *markbits) {
   339  	// xor 01 moves 01(scalar unmarked) to 00(scalar marked)
   340  	// and 10(pointer unmarked) to 11(pointer marked)
   341  	if mbits.tbits == _BitsScalar {
   342  		atomicand8(mbits.bitp, ^byte(_BitsCheckMarkXor<<mbits.shift<<2))
   343  	} else if mbits.tbits == _BitsPointer {
   344  		atomicor8(mbits.bitp, byte(_BitsCheckMarkXor<<mbits.shift<<2))
   345  	}
   346  
   347  	// reload bits for ischeckmarked
   348  	mbits.xbits = *mbits.bitp
   349  	mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
   350  	mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
   351  }
   352  
   353  // In the default scheme does mbits refer to a marked object.
   354  func ismarked(mbits *markbits) bool {
   355  	if mbits.bits&bitBoundary != bitBoundary {
   356  		gothrow("ismarked: bits should have boundary bit set")
   357  	}
   358  	return mbits.bits&bitMarked == bitMarked
   359  }
   360  
   361  // In the checkmark scheme does mbits refer to a marked object.
   362  func ischeckmarked(mbits *markbits) bool {
   363  	if mbits.bits&bitBoundary != bitBoundary {
   364  		gothrow("ischeckmarked: bits should have boundary bit set")
   365  	}
   366  	return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
   367  }
   368  
   369  // When in GCmarkterminate phase we allocate black.
   370  func gcmarknewobject_m(obj uintptr) {
   371  	if gcphase != _GCmarktermination {
   372  		gothrow("marking new object while not in mark termination phase")
   373  	}
   374  	if checkmark { // The world should be stopped so this should not happen.
   375  		gothrow("gcmarknewobject called while doing checkmark")
   376  	}
   377  
   378  	var mbits markbits
   379  	slottombits(obj, &mbits)
   380  	if mbits.bits&bitMarked != 0 {
   381  		return
   382  	}
   383  
   384  	// Each byte of GC bitmap holds info for two words.
   385  	// If the current object is larger than two words, or if the object is one word
   386  	// but the object it shares the byte with is already marked,
   387  	// then all the possible concurrent updates are trying to set the same bit,
   388  	// so we can use a non-atomic update.
   389  	if mbits.xbits&(bitMask|(bitMask<<gcBits)) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
   390  		*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
   391  	} else {
   392  		atomicor8(mbits.bitp, bitMarked<<mbits.shift)
   393  	}
   394  }
   395  
   396  // obj is the start of an object with mark mbits.
   397  // If it isn't already marked, mark it and enqueue into workbuf.
   398  // Return possibly new workbuf to use.
   399  func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
   400  	// obj should be start of allocation, and so must be at least pointer-aligned.
   401  	if obj&(ptrSize-1) != 0 {
   402  		gothrow("greyobject: obj not pointer-aligned")
   403  	}
   404  
   405  	if checkmark {
   406  		if !ismarked(mbits) {
   407  			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), ", mbits->bits=", hex(mbits.bits), " *mbits->bitp=", hex(*mbits.bitp), "\n")
   408  
   409  			k := obj >> _PageShift
   410  			x := k
   411  			x -= mheap_.arena_start >> _PageShift
   412  			s := h_spans[x]
   413  			printlock()
   414  			print("runtime:greyobject Span: obj=", hex(obj), " k=", hex(k))
   415  			if s == nil {
   416  				print(" s=nil\n")
   417  			} else {
   418  				print(" s.start=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
   419  				// NOTE(rsc): This code is using s.sizeclass as an approximation of the
   420  				// number of pointer-sized words in an object. Perhaps not what was intended.
   421  				for i := 0; i < int(s.sizeclass); i++ {
   422  					print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
   423  				}
   424  			}
   425  			gothrow("checkmark found unmarked object")
   426  		}
   427  		if ischeckmarked(mbits) {
   428  			return wbuf
   429  		}
   430  		docheckmark(mbits)
   431  		if !ischeckmarked(mbits) {
   432  			print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
   433  			gothrow("docheckmark and ischeckmarked disagree")
   434  		}
   435  	} else {
   436  		// If marked we have nothing to do.
   437  		if mbits.bits&bitMarked != 0 {
   438  			return wbuf
   439  		}
   440  
   441  		// Each byte of GC bitmap holds info for two words.
   442  		// If the current object is larger than two words, or if the object is one word
   443  		// but the object it shares the byte with is already marked,
   444  		// then all the possible concurrent updates are trying to set the same bit,
   445  		// so we can use a non-atomic update.
   446  		if mbits.xbits&(bitMask|bitMask<<gcBits) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
   447  			*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
   448  		} else {
   449  			atomicor8(mbits.bitp, bitMarked<<mbits.shift)
   450  		}
   451  	}
   452  
   453  	if !checkmark && (mbits.xbits>>(mbits.shift+2))&_BitsMask == _BitsDead {
   454  		return wbuf // noscan object
   455  	}
   456  
   457  	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
   458  	// seems like a nice optimization that can be added back in.
   459  	// There needs to be time between the PREFETCH and the use.
   460  	// Previously we put the obj in an 8 element buffer that is drained at a rate
   461  	// to give the PREFETCH time to do its work.
   462  	// Use of PREFETCHNTA might be more appropriate than PREFETCH
   463  
   464  	// If workbuf is full, obtain an empty one.
   465  	if wbuf.nobj >= uintptr(len(wbuf.obj)) {
   466  		wbuf = getempty(wbuf)
   467  	}
   468  
   469  	wbuf.obj[wbuf.nobj] = obj
   470  	wbuf.nobj++
   471  	return wbuf
   472  }
   473  
   474  // Scan the object b of size n, adding pointers to wbuf.
   475  // Return possibly new wbuf to use.
   476  // If ptrmask != nil, it specifies where pointers are in b.
   477  // If ptrmask == nil, the GC bitmap should be consulted.
   478  // In this case, n may be an overestimate of the size; the GC bitmap
   479  // must also be used to make sure the scan stops at the end of b.
   480  func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
   481  	arena_start := mheap_.arena_start
   482  	arena_used := mheap_.arena_used
   483  
   484  	// Find bits of the beginning of the object.
   485  	var ptrbitp unsafe.Pointer
   486  	var mbits markbits
   487  	if ptrmask == nil {
   488  		b = objectstart(b, &mbits)
   489  		if b == 0 {
   490  			return wbuf
   491  		}
   492  		ptrbitp = unsafe.Pointer(mbits.bitp)
   493  	}
   494  	for i := uintptr(0); i < n; i += ptrSize {
   495  		// Find bits for this word.
   496  		var bits uintptr
   497  		if ptrmask != nil {
   498  			// dense mask (stack or data)
   499  			bits = (uintptr(*(*byte)(add(unsafe.Pointer(ptrmask), (i/ptrSize)/4))) >> (((i / ptrSize) % 4) * bitsPerPointer)) & bitsMask
   500  		} else {
   501  			// Check if we have reached end of span.
   502  			// n is an overestimate of the size of the object.
   503  			if (b+i)%_PageSize == 0 && h_spans[(b-arena_start)>>_PageShift] != h_spans[(b+i-arena_start)>>_PageShift] {
   504  				break
   505  			}
   506  
   507  			// Consult GC bitmap.
   508  			bits = uintptr(*(*byte)(ptrbitp))
   509  			if wordsPerBitmapByte != 2 {
   510  				gothrow("alg doesn't work for wordsPerBitmapByte != 2")
   511  			}
   512  			j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
   513  			bits >>= gcBits * j
   514  			if i == 0 {
   515  				bits &^= bitBoundary
   516  			}
   517  			ptrbitp = add(ptrbitp, -j)
   518  
   519  			if bits&bitBoundary != 0 && i != 0 {
   520  				break // reached beginning of the next object
   521  			}
   522  			bits = (bits & bitPtrMask) >> 2 // bits refer to the type bits.
   523  
   524  			if i != 0 && bits == bitsDead { // BitsDead in first nibble not valid during checkmark
   525  				break // reached no-scan part of the object
   526  			}
   527  		}
   528  
   529  		if bits <= _BitsScalar { // _BitsScalar, _BitsDead, _BitsScalarMarked
   530  			continue
   531  		}
   532  
   533  		if bits&_BitsPointer != _BitsPointer {
   534  			print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
   535  			gothrow("unexpected garbage collection bits")
   536  		}
   537  
   538  		obj := *(*uintptr)(unsafe.Pointer(b + i))
   539  
   540  		// At this point we have extracted the next potential pointer.
   541  		// Check if it points into heap.
   542  		if obj == 0 || obj < arena_start || obj >= arena_used {
   543  			continue
   544  		}
   545  
   546  		// Mark the object. return some important bits.
   547  		// We we combine the following two rotines we don't have to pass mbits or obj around.
   548  		var mbits markbits
   549  		obj = objectstart(obj, &mbits)
   550  		if obj == 0 {
   551  			continue
   552  		}
   553  		wbuf = greyobject(obj, &mbits, wbuf)
   554  	}
   555  	return wbuf
   556  }
   557  
   558  // scanblock starts by scanning b as scanobject would.
   559  // If the gcphase is GCscan, that's all scanblock does.
   560  // Otherwise it traverses some fraction of the pointers it found in b, recursively.
   561  // As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
   562  // stopping only when no work is left in the system.
   563  func scanblock(b, n uintptr, ptrmask *uint8) {
   564  	wbuf := getpartialorempty()
   565  	if b != 0 {
   566  		wbuf = scanobject(b, n, ptrmask, wbuf)
   567  		if gcphase == _GCscan {
   568  			if inheap(b) && ptrmask == nil {
   569  				// b is in heap, we are in GCscan so there should be a ptrmask.
   570  				gothrow("scanblock: In GCscan phase and inheap is true.")
   571  			}
   572  			// GCscan only goes one level deep since mark wb not turned on.
   573  			putpartial(wbuf)
   574  			return
   575  		}
   576  	}
   577  	if gcphase == _GCscan {
   578  		gothrow("scanblock: In GCscan phase but no b passed in.")
   579  	}
   580  
   581  	keepworking := b == 0
   582  
   583  	// ptrmask can have 2 possible values:
   584  	// 1. nil - obtain pointer mask from GC bitmap.
   585  	// 2. pointer to a compact mask (for stacks and data).
   586  	for {
   587  		if wbuf.nobj == 0 {
   588  			if !keepworking {
   589  				putempty(wbuf)
   590  				return
   591  			}
   592  			// Refill workbuf from global queue.
   593  			wbuf = getfull(wbuf)
   594  			if wbuf == nil { // nil means out of work barrier reached
   595  				return
   596  			}
   597  
   598  			if wbuf.nobj <= 0 {
   599  				gothrow("runtime:scanblock getfull returns empty buffer")
   600  			}
   601  		}
   602  
   603  		// If another proc wants a pointer, give it some.
   604  		if work.nwait > 0 && wbuf.nobj > 4 && work.full == 0 {
   605  			wbuf = handoff(wbuf)
   606  		}
   607  
   608  		// This might be a good place to add prefetch code...
   609  		// if(wbuf->nobj > 4) {
   610  		//         PREFETCH(wbuf->obj[wbuf->nobj - 3];
   611  		//  }
   612  		wbuf.nobj--
   613  		b = wbuf.obj[wbuf.nobj]
   614  		wbuf = scanobject(b, mheap_.arena_used-b, nil, wbuf)
   615  	}
   616  }
   617  
   618  func markroot(desc *parfor, i uint32) {
   619  	// Note: if you add a case here, please also update heapdump.c:dumproots.
   620  	switch i {
   621  	case _RootData:
   622  		scanblock(uintptr(unsafe.Pointer(&data)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)), gcdatamask.bytedata)
   623  
   624  	case _RootBss:
   625  		scanblock(uintptr(unsafe.Pointer(&bss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)), gcbssmask.bytedata)
   626  
   627  	case _RootFinalizers:
   628  		for fb := allfin; fb != nil; fb = fb.alllink {
   629  			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0])
   630  		}
   631  
   632  	case _RootSpans:
   633  		// mark MSpan.specials
   634  		sg := mheap_.sweepgen
   635  		for spanidx := uint32(0); spanidx < uint32(len(work.spans)); spanidx++ {
   636  			s := work.spans[spanidx]
   637  			if s.state != mSpanInUse {
   638  				continue
   639  			}
   640  			if !checkmark && s.sweepgen != sg {
   641  				// sweepgen was updated (+2) during non-checkmark GC pass
   642  				print("sweep ", s.sweepgen, " ", sg, "\n")
   643  				gothrow("gc: unswept span")
   644  			}
   645  			for sp := s.specials; sp != nil; sp = sp.next {
   646  				if sp.kind != _KindSpecialFinalizer {
   647  					continue
   648  				}
   649  				// don't mark finalized object, but scan it so we
   650  				// retain everything it points to.
   651  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   652  				// A finalizer can be set for an inner byte of an object, find object beginning.
   653  				p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   654  				if gcphase != _GCscan {
   655  					scanblock(p, s.elemsize, nil) // scanned during mark phase
   656  				}
   657  				scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptr[0])
   658  			}
   659  		}
   660  
   661  	case _RootFlushCaches:
   662  		if gcphase != _GCscan { // Do not flush mcaches during GCscan phase.
   663  			flushallmcaches()
   664  		}
   665  
   666  	default:
   667  		// the rest is scanning goroutine stacks
   668  		if uintptr(i-_RootCount) >= allglen {
   669  			gothrow("markroot: bad index")
   670  		}
   671  		gp := allgs[i-_RootCount]
   672  
   673  		// remember when we've first observed the G blocked
   674  		// needed only to output in traceback
   675  		status := readgstatus(gp) // We are not in a scan state
   676  		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
   677  			gp.waitsince = work.tstart
   678  		}
   679  
   680  		// Shrink a stack if not much of it is being used but not in the scan phase.
   681  		if gcphase != _GCscan { // Do not shrink during GCscan phase.
   682  			shrinkstack(gp)
   683  		}
   684  		if readgstatus(gp) == _Gdead {
   685  			gp.gcworkdone = true
   686  		} else {
   687  			gp.gcworkdone = false
   688  		}
   689  		restart := stopg(gp)
   690  
   691  		// goroutine will scan its own stack when it stops running.
   692  		// Wait until it has.
   693  		for readgstatus(gp) == _Grunning && !gp.gcworkdone {
   694  		}
   695  
   696  		// scanstack(gp) is done as part of gcphasework
   697  		// But to make sure we finished we need to make sure that
   698  		// the stack traps have all responded so drop into
   699  		// this while loop until they respond.
   700  		for !gp.gcworkdone {
   701  			status = readgstatus(gp)
   702  			if status == _Gdead {
   703  				gp.gcworkdone = true // scan is a noop
   704  				break
   705  			}
   706  			if status == _Gwaiting || status == _Grunnable {
   707  				restart = stopg(gp)
   708  			}
   709  		}
   710  		if restart {
   711  			restartg(gp)
   712  		}
   713  	}
   714  }
   715  
   716  // Get an empty work buffer off the work.empty list,
   717  // allocating new buffers as needed.
   718  func getempty(b *workbuf) *workbuf {
   719  	if b != nil {
   720  		putfull(b)
   721  		b = nil
   722  	}
   723  	if work.empty != 0 {
   724  		b = (*workbuf)(lfstackpop(&work.empty))
   725  	}
   726  	if b != nil && b.nobj != 0 {
   727  		_g_ := getg()
   728  		print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
   729  		gothrow("getempty: workbuffer not empty, b->nobj not 0")
   730  	}
   731  	if b == nil {
   732  		b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
   733  		b.nobj = 0
   734  	}
   735  	return b
   736  }
   737  
   738  func putempty(b *workbuf) {
   739  	if b.nobj != 0 {
   740  		gothrow("putempty: b->nobj not 0")
   741  	}
   742  	lfstackpush(&work.empty, &b.node)
   743  }
   744  
   745  func putfull(b *workbuf) {
   746  	if b.nobj <= 0 {
   747  		gothrow("putfull: b->nobj <= 0")
   748  	}
   749  	lfstackpush(&work.full, &b.node)
   750  }
   751  
   752  // Get an partially empty work buffer
   753  // if none are available get an empty one.
   754  func getpartialorempty() *workbuf {
   755  	b := (*workbuf)(lfstackpop(&work.partial))
   756  	if b == nil {
   757  		b = getempty(nil)
   758  	}
   759  	return b
   760  }
   761  
   762  func putpartial(b *workbuf) {
   763  	if b.nobj == 0 {
   764  		lfstackpush(&work.empty, &b.node)
   765  	} else if b.nobj < uintptr(len(b.obj)) {
   766  		lfstackpush(&work.partial, &b.node)
   767  	} else if b.nobj == uintptr(len(b.obj)) {
   768  		lfstackpush(&work.full, &b.node)
   769  	} else {
   770  		print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
   771  		gothrow("putpartial: bad Workbuf b.nobj")
   772  	}
   773  }
   774  
   775  // Get a full work buffer off the work.full or a partially
   776  // filled one off the work.partial list. If nothing is available
   777  // wait until all the other gc helpers have finished and then
   778  // return nil.
   779  // getfull acts as a barrier for work.nproc helpers. As long as one
   780  // gchelper is actively marking objects it
   781  // may create a workbuffer that the other helpers can work on.
   782  // The for loop either exits when a work buffer is found
   783  // or when _all_ of the work.nproc GC helpers are in the loop
   784  // looking for work and thus not capable of creating new work.
   785  // This is in fact the termination condition for the STW mark
   786  // phase.
   787  func getfull(b *workbuf) *workbuf {
   788  	if b != nil {
   789  		putempty(b)
   790  	}
   791  
   792  	b = (*workbuf)(lfstackpop(&work.full))
   793  	if b == nil {
   794  		b = (*workbuf)(lfstackpop(&work.partial))
   795  	}
   796  	if b != nil || work.nproc == 1 {
   797  		return b
   798  	}
   799  
   800  	xadd(&work.nwait, +1)
   801  	for i := 0; ; i++ {
   802  		if work.full != 0 {
   803  			xadd(&work.nwait, -1)
   804  			b = (*workbuf)(lfstackpop(&work.full))
   805  			if b == nil {
   806  				b = (*workbuf)(lfstackpop(&work.partial))
   807  			}
   808  			if b != nil {
   809  				return b
   810  			}
   811  			xadd(&work.nwait, +1)
   812  		}
   813  		if work.nwait == work.nproc {
   814  			return nil
   815  		}
   816  		_g_ := getg()
   817  		if i < 10 {
   818  			_g_.m.gcstats.nprocyield++
   819  			procyield(20)
   820  		} else if i < 20 {
   821  			_g_.m.gcstats.nosyield++
   822  			osyield()
   823  		} else {
   824  			_g_.m.gcstats.nsleep++
   825  			usleep(100)
   826  		}
   827  	}
   828  }
   829  
   830  func handoff(b *workbuf) *workbuf {
   831  	// Make new buffer with half of b's pointers.
   832  	b1 := getempty(nil)
   833  	n := b.nobj / 2
   834  	b.nobj -= n
   835  	b1.nobj = n
   836  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), n*unsafe.Sizeof(b1.obj[0]))
   837  	_g_ := getg()
   838  	_g_.m.gcstats.nhandoff++
   839  	_g_.m.gcstats.nhandoffcnt += uint64(n)
   840  
   841  	// Put b on full list - let first half of b get stolen.
   842  	lfstackpush(&work.full, &b.node)
   843  	return b1
   844  }
   845  
   846  func stackmapdata(stkmap *stackmap, n int32) bitvector {
   847  	if n < 0 || n >= stkmap.n {
   848  		gothrow("stackmapdata: index out of range")
   849  	}
   850  	return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
   851  }
   852  
   853  // Scan a stack frame: local variables and function arguments/results.
   854  func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
   855  
   856  	f := frame.fn
   857  	targetpc := frame.continpc
   858  	if targetpc == 0 {
   859  		// Frame is dead.
   860  		return true
   861  	}
   862  	if _DebugGC > 1 {
   863  		print("scanframe ", gofuncname(f), "\n")
   864  	}
   865  	if targetpc != f.entry {
   866  		targetpc--
   867  	}
   868  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
   869  	if pcdata == -1 {
   870  		// We do not have a valid pcdata value but there might be a
   871  		// stackmap for this function.  It is likely that we are looking
   872  		// at the function prologue, assume so and hope for the best.
   873  		pcdata = 0
   874  	}
   875  
   876  	// Scan local variables if stack frame has been allocated.
   877  	size := frame.varp - frame.sp
   878  	var minsize uintptr
   879  	if thechar != '6' && thechar != '8' {
   880  		minsize = ptrSize
   881  	} else {
   882  		minsize = 0
   883  	}
   884  	if size > minsize {
   885  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   886  		if stkmap == nil || stkmap.n <= 0 {
   887  			print("runtime: frame ", gofuncname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
   888  			gothrow("missing stackmap")
   889  		}
   890  
   891  		// Locals bitmap information, scan just the pointers in locals.
   892  		if pcdata < 0 || pcdata >= stkmap.n {
   893  			// don't know where we are
   894  			print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
   895  			gothrow("scanframe: bad symbol table")
   896  		}
   897  		bv := stackmapdata(stkmap, pcdata)
   898  		size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
   899  		scanblock(frame.varp-size, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
   900  	}
   901  
   902  	// Scan arguments.
   903  	if frame.arglen > 0 {
   904  		var bv bitvector
   905  		if frame.argmap != nil {
   906  			bv = *frame.argmap
   907  		} else {
   908  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   909  			if stkmap == nil || stkmap.n <= 0 {
   910  				print("runtime: frame ", gofuncname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
   911  				gothrow("missing stackmap")
   912  			}
   913  			if pcdata < 0 || pcdata >= stkmap.n {
   914  				// don't know where we are
   915  				print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
   916  				gothrow("scanframe: bad symbol table")
   917  			}
   918  			bv = stackmapdata(stkmap, pcdata)
   919  		}
   920  		scanblock(frame.argp, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
   921  	}
   922  	return true
   923  }
   924  
   925  func scanstack(gp *g) {
   926  
   927  	if readgstatus(gp)&_Gscan == 0 {
   928  		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
   929  		gothrow("scanstack - bad status")
   930  	}
   931  
   932  	switch readgstatus(gp) &^ _Gscan {
   933  	default:
   934  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   935  		gothrow("mark - bad status")
   936  	case _Gdead:
   937  		return
   938  	case _Grunning:
   939  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   940  		gothrow("scanstack: goroutine not stopped")
   941  	case _Grunnable, _Gsyscall, _Gwaiting:
   942  		// ok
   943  	}
   944  
   945  	if gp == getg() {
   946  		gothrow("can't scan our own stack")
   947  	}
   948  	mp := gp.m
   949  	if mp != nil && mp.helpgc != 0 {
   950  		gothrow("can't scan gchelper stack")
   951  	}
   952  
   953  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
   954  	tracebackdefers(gp, scanframe, nil)
   955  }
   956  
   957  // If the slot is grey or black return true, if white return false.
   958  // If the slot is not in the known heap and thus does not have a valid GC bitmap then
   959  // it is considered grey. Globals and stacks can hold such slots.
   960  // The slot is grey if its mark bit is set and it is enqueued to be scanned.
   961  // The slot is black if it has already been scanned.
   962  // It is white if it has a valid mark bit and the bit is not set.
   963  func shaded(slot uintptr) bool {
   964  	if !inheap(slot) { // non-heap slots considered grey
   965  		return true
   966  	}
   967  
   968  	var mbits markbits
   969  	valid := objectstart(slot, &mbits)
   970  	if valid == 0 {
   971  		return true
   972  	}
   973  
   974  	if checkmark {
   975  		return ischeckmarked(&mbits)
   976  	}
   977  
   978  	return mbits.bits&bitMarked != 0
   979  }
   980  
   981  // Shade the object if it isn't already.
   982  // The object is not nil and known to be in the heap.
   983  func shade(b uintptr) {
   984  	if !inheap(b) {
   985  		gothrow("shade: passed an address not in the heap")
   986  	}
   987  
   988  	wbuf := getpartialorempty()
   989  	// Mark the object, return some important bits.
   990  	// If we combine the following two rotines we don't have to pass mbits or obj around.
   991  	var mbits markbits
   992  	obj := objectstart(b, &mbits)
   993  	if obj != 0 {
   994  		wbuf = greyobject(obj, &mbits, wbuf) // augments the wbuf
   995  	}
   996  	putpartial(wbuf)
   997  }
   998  
   999  // This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
  1000  // The original Dijkstra barrier only shaded ptrs being placed in black slots.
  1001  //
  1002  // Shade indicates that it has seen a white pointer by adding the referent
  1003  // to wbuf as well as marking it.
  1004  //
  1005  // slot is the destination (dst) in go code
  1006  // ptr is the value that goes into the slot (src) in the go code
  1007  //
  1008  // Dijkstra pointed out that maintaining the no black to white
  1009  // pointers means that white to white pointers not need
  1010  // to be noted by the write barrier. Furthermore if either
  1011  // white object dies before it is reached by the
  1012  // GC then the object can be collected during this GC cycle
  1013  // instead of waiting for the next cycle. Unfortunately the cost of
  1014  // ensure that the object holding the slot doesn't concurrently
  1015  // change to black without the mutator noticing seems prohibitive.
  1016  //
  1017  // Consider the following example where the mutator writes into
  1018  // a slot and then loads the slot's mark bit while the GC thread
  1019  // writes to the slot's mark bit and then as part of scanning reads
  1020  // the slot.
  1021  //
  1022  // Initially both [slot] and [slotmark] are 0 (nil)
  1023  // Mutator thread          GC thread
  1024  // st [slot], ptr          st [slotmark], 1
  1025  //
  1026  // ld r1, [slotmark]       ld r2, [slot]
  1027  //
  1028  // This is a classic example of independent reads of independent writes,
  1029  // aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
  1030  // answer is yes without inserting a memory barriers between the st and the ld.
  1031  // These barriers are expensive so we have decided that we will
  1032  // always grey the ptr object regardless of the slot's color.
  1033  func gcmarkwb_m(slot *uintptr, ptr uintptr) {
  1034  	switch gcphase {
  1035  	default:
  1036  		gothrow("gcphasework in bad gcphase")
  1037  
  1038  	case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
  1039  		// ok
  1040  
  1041  	case _GCmark, _GCmarktermination:
  1042  		if ptr != 0 && inheap(ptr) {
  1043  			shade(ptr)
  1044  		}
  1045  	}
  1046  }
  1047  
  1048  // The gp has been moved to a GC safepoint. GC phase specific
  1049  // work is done here.
  1050  func gcphasework(gp *g) {
  1051  	switch gcphase {
  1052  	default:
  1053  		gothrow("gcphasework in bad gcphase")
  1054  	case _GCoff, _GCquiesce, _GCstw, _GCsweep:
  1055  		// No work.
  1056  	case _GCscan:
  1057  		// scan the stack, mark the objects, put pointers in work buffers
  1058  		// hanging off the P where this is being run.
  1059  		scanstack(gp)
  1060  	case _GCmark:
  1061  		// No work.
  1062  	case _GCmarktermination:
  1063  		scanstack(gp)
  1064  		// All available mark work will be emptied before returning.
  1065  	}
  1066  	gp.gcworkdone = true
  1067  }
  1068  
  1069  var finalizer1 = [...]byte{
  1070  	// Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
  1071  	// Each byte describes 4 words.
  1072  	// Need 4 Finalizers described by 5 bytes before pattern repeats:
  1073  	//	ptr ptr uintptr ptr ptr
  1074  	//	ptr ptr uintptr ptr ptr
  1075  	//	ptr ptr uintptr ptr ptr
  1076  	//	ptr ptr uintptr ptr ptr
  1077  	// aka
  1078  	//	ptr ptr uintptr ptr
  1079  	//	ptr ptr ptr uintptr
  1080  	//	ptr ptr ptr ptr
  1081  	//	uintptr ptr ptr ptr
  1082  	//	ptr uintptr ptr ptr
  1083  	// Assumptions about Finalizer layout checked below.
  1084  	bitsPointer | bitsPointer<<2 | bitsScalar<<4 | bitsPointer<<6,
  1085  	bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsScalar<<6,
  1086  	bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
  1087  	bitsScalar | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
  1088  	bitsPointer | bitsScalar<<2 | bitsPointer<<4 | bitsPointer<<6,
  1089  }
  1090  
  1091  func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
  1092  	lock(&finlock)
  1093  	if finq == nil || finq.cnt == finq.cap {
  1094  		if finc == nil {
  1095  			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
  1096  			finc.cap = int32((_FinBlockSize-unsafe.Sizeof(finblock{}))/unsafe.Sizeof(finalizer{}) + 1)
  1097  			finc.alllink = allfin
  1098  			allfin = finc
  1099  			if finptrmask[0] == 0 {
  1100  				// Build pointer mask for Finalizer array in block.
  1101  				// Check assumptions made in finalizer1 array above.
  1102  				if (unsafe.Sizeof(finalizer{}) != 5*ptrSize ||
  1103  					unsafe.Offsetof(finalizer{}.fn) != 0 ||
  1104  					unsafe.Offsetof(finalizer{}.arg) != ptrSize ||
  1105  					unsafe.Offsetof(finalizer{}.nret) != 2*ptrSize ||
  1106  					unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
  1107  					unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
  1108  					bitsPerPointer != 2) {
  1109  					gothrow("finalizer out of sync")
  1110  				}
  1111  				for i := range finptrmask {
  1112  					finptrmask[i] = finalizer1[i%len(finalizer1)]
  1113  				}
  1114  			}
  1115  		}
  1116  		block := finc
  1117  		finc = block.next
  1118  		block.next = finq
  1119  		finq = block
  1120  	}
  1121  	f := (*finalizer)(add(unsafe.Pointer(&finq.fin[0]), uintptr(finq.cnt)*unsafe.Sizeof(finq.fin[0])))
  1122  	finq.cnt++
  1123  	f.fn = fn
  1124  	f.nret = nret
  1125  	f.fint = fint
  1126  	f.ot = ot
  1127  	f.arg = p
  1128  	fingwake = true
  1129  	unlock(&finlock)
  1130  }
  1131  
  1132  func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
  1133  	for fb := allfin; fb != nil; fb = fb.alllink {
  1134  		for i := int32(0); i < fb.cnt; i++ {
  1135  			f := &fb.fin[i]
  1136  			callback(f.fn, f.arg, f.nret, f.fint, f.ot)
  1137  		}
  1138  	}
  1139  }
  1140  
  1141  // Returns only when span s has been swept.
  1142  func mSpan_EnsureSwept(s *mspan) {
  1143  	// Caller must disable preemption.
  1144  	// Otherwise when this function returns the span can become unswept again
  1145  	// (if GC is triggered on another goroutine).
  1146  	_g_ := getg()
  1147  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
  1148  		gothrow("MSpan_EnsureSwept: m is not locked")
  1149  	}
  1150  
  1151  	sg := mheap_.sweepgen
  1152  	if atomicload(&s.sweepgen) == sg {
  1153  		return
  1154  	}
  1155  	// The caller must be sure that the span is a MSpanInUse span.
  1156  	if cas(&s.sweepgen, sg-2, sg-1) {
  1157  		mSpan_Sweep(s, false)
  1158  		return
  1159  	}
  1160  	// unfortunate condition, and we don't have efficient means to wait
  1161  	for atomicload(&s.sweepgen) != sg {
  1162  		osyield()
  1163  	}
  1164  }
  1165  
  1166  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
  1167  // It clears the mark bits in preparation for the next GC round.
  1168  // Returns true if the span was returned to heap.
  1169  // If preserve=true, don't return it to heap nor relink in MCentral lists;
  1170  // caller takes care of it.
  1171  func mSpan_Sweep(s *mspan, preserve bool) bool {
  1172  	if checkmark {
  1173  		gothrow("MSpan_Sweep: checkmark only runs in STW and after the sweep")
  1174  	}
  1175  
  1176  	// It's critical that we enter this function with preemption disabled,
  1177  	// GC must not start while we are in the middle of this function.
  1178  	_g_ := getg()
  1179  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
  1180  		gothrow("MSpan_Sweep: m is not locked")
  1181  	}
  1182  	sweepgen := mheap_.sweepgen
  1183  	if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
  1184  		print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
  1185  		gothrow("MSpan_Sweep: bad span state")
  1186  	}
  1187  	arena_start := mheap_.arena_start
  1188  	cl := s.sizeclass
  1189  	size := s.elemsize
  1190  	var n int32
  1191  	var npages int32
  1192  	if cl == 0 {
  1193  		n = 1
  1194  	} else {
  1195  		// Chunk full of small blocks.
  1196  		npages = class_to_allocnpages[cl]
  1197  		n = (npages << _PageShift) / int32(size)
  1198  	}
  1199  	res := false
  1200  	nfree := 0
  1201  
  1202  	var head, end gclinkptr
  1203  
  1204  	c := _g_.m.mcache
  1205  	sweepgenset := false
  1206  
  1207  	// Mark any free objects in this span so we don't collect them.
  1208  	for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
  1209  		off := (uintptr(unsafe.Pointer(link)) - arena_start) / ptrSize
  1210  		bitp := arena_start - off/wordsPerBitmapByte - 1
  1211  		shift := (off % wordsPerBitmapByte) * gcBits
  1212  		*(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
  1213  	}
  1214  
  1215  	// Unlink & free special records for any objects we're about to free.
  1216  	specialp := &s.specials
  1217  	special := *specialp
  1218  	for special != nil {
  1219  		// A finalizer can be set for an inner byte of an object, find object beginning.
  1220  		p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
  1221  		off := (p - arena_start) / ptrSize
  1222  		bitp := arena_start - off/wordsPerBitmapByte - 1
  1223  		shift := (off % wordsPerBitmapByte) * gcBits
  1224  		bits := (*(*byte)(unsafe.Pointer(bitp)) >> shift) & bitMask
  1225  		if bits&bitMarked == 0 {
  1226  			// Find the exact byte for which the special was setup
  1227  			// (as opposed to object beginning).
  1228  			p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
  1229  			// about to free object: splice out special record
  1230  			y := special
  1231  			special = special.next
  1232  			*specialp = special
  1233  			if !freespecial(y, unsafe.Pointer(p), size, false) {
  1234  				// stop freeing of object if it has a finalizer
  1235  				*(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
  1236  			}
  1237  		} else {
  1238  			// object is still live: keep special record
  1239  			specialp = &special.next
  1240  			special = *specialp
  1241  		}
  1242  	}
  1243  
  1244  	// Sweep through n objects of given size starting at p.
  1245  	// This thread owns the span now, so it can manipulate
  1246  	// the block bitmap without atomic operations.
  1247  	p := uintptr(s.start << _PageShift)
  1248  	off := (p - arena_start) / ptrSize
  1249  	bitp := arena_start - off/wordsPerBitmapByte - 1
  1250  	shift := uint(0)
  1251  	step := size / (ptrSize * wordsPerBitmapByte)
  1252  	// Rewind to the previous quadruple as we move to the next
  1253  	// in the beginning of the loop.
  1254  	bitp += step
  1255  	if step == 0 {
  1256  		// 8-byte objects.
  1257  		bitp++
  1258  		shift = gcBits
  1259  	}
  1260  	for ; n > 0; n, p = n-1, p+size {
  1261  		bitp -= step
  1262  		if step == 0 {
  1263  			if shift != 0 {
  1264  				bitp--
  1265  			}
  1266  			shift = gcBits - shift
  1267  		}
  1268  
  1269  		xbits := *(*byte)(unsafe.Pointer(bitp))
  1270  		bits := (xbits >> shift) & bitMask
  1271  
  1272  		// Allocated and marked object, reset bits to allocated.
  1273  		if bits&bitMarked != 0 {
  1274  			*(*byte)(unsafe.Pointer(bitp)) &^= bitMarked << shift
  1275  			continue
  1276  		}
  1277  
  1278  		// At this point we know that we are looking at garbage object
  1279  		// that needs to be collected.
  1280  		if debug.allocfreetrace != 0 {
  1281  			tracefree(unsafe.Pointer(p), size)
  1282  		}
  1283  
  1284  		// Reset to allocated+noscan.
  1285  		*(*byte)(unsafe.Pointer(bitp)) = uint8(uintptr(xbits&^((bitMarked|bitsMask<<2)<<shift)) | uintptr(bitsDead)<<(shift+2))
  1286  		if cl == 0 {
  1287  			// Free large span.
  1288  			if preserve {
  1289  				gothrow("can't preserve large span")
  1290  			}
  1291  			unmarkspan(p, s.npages<<_PageShift)
  1292  			s.needzero = 1
  1293  
  1294  			// important to set sweepgen before returning it to heap
  1295  			atomicstore(&s.sweepgen, sweepgen)
  1296  			sweepgenset = true
  1297  
  1298  			// NOTE(rsc,dvyukov): The original implementation of efence
  1299  			// in CL 22060046 used SysFree instead of SysFault, so that
  1300  			// the operating system would eventually give the memory
  1301  			// back to us again, so that an efence program could run
  1302  			// longer without running out of memory. Unfortunately,
  1303  			// calling SysFree here without any kind of adjustment of the
  1304  			// heap data structures means that when the memory does
  1305  			// come back to us, we have the wrong metadata for it, either in
  1306  			// the MSpan structures or in the garbage collection bitmap.
  1307  			// Using SysFault here means that the program will run out of
  1308  			// memory fairly quickly in efence mode, but at least it won't
  1309  			// have mysterious crashes due to confused memory reuse.
  1310  			// It should be possible to switch back to SysFree if we also
  1311  			// implement and then call some kind of MHeap_DeleteSpan.
  1312  			if debug.efence > 0 {
  1313  				s.limit = 0 // prevent mlookup from finding this span
  1314  				sysFault(unsafe.Pointer(p), size)
  1315  			} else {
  1316  				mHeap_Free(&mheap_, s, 1)
  1317  			}
  1318  			c.local_nlargefree++
  1319  			c.local_largefree += size
  1320  			xadd64(&memstats.next_gc, -int64(size)*int64(gcpercent+100)/100)
  1321  			res = true
  1322  		} else {
  1323  			// Free small object.
  1324  			if size > 2*ptrSize {
  1325  				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
  1326  			} else if size > ptrSize {
  1327  				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
  1328  			}
  1329  			if head.ptr() == nil {
  1330  				head = gclinkptr(p)
  1331  			} else {
  1332  				end.ptr().next = gclinkptr(p)
  1333  			}
  1334  			end = gclinkptr(p)
  1335  			end.ptr().next = gclinkptr(0x0bade5)
  1336  			nfree++
  1337  		}
  1338  	}
  1339  
  1340  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
  1341  	// because of the potential for a concurrent free/SetFinalizer.
  1342  	// But we need to set it before we make the span available for allocation
  1343  	// (return it to heap or mcentral), because allocation code assumes that a
  1344  	// span is already swept if available for allocation.
  1345  	if !sweepgenset && nfree == 0 {
  1346  		// The span must be in our exclusive ownership until we update sweepgen,
  1347  		// check for potential races.
  1348  		if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
  1349  			print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
  1350  			gothrow("MSpan_Sweep: bad span state after sweep")
  1351  		}
  1352  		atomicstore(&s.sweepgen, sweepgen)
  1353  	}
  1354  	if nfree > 0 {
  1355  		c.local_nsmallfree[cl] += uintptr(nfree)
  1356  		c.local_cachealloc -= intptr(uintptr(nfree) * size)
  1357  		xadd64(&memstats.next_gc, -int64(nfree)*int64(size)*int64(gcpercent+100)/100)
  1358  		res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
  1359  		// MCentral_FreeSpan updates sweepgen
  1360  	}
  1361  	return res
  1362  }
  1363  
  1364  // State of background sweep.
  1365  // Protected by gclock.
  1366  type sweepdata struct {
  1367  	g       *g
  1368  	parked  bool
  1369  	started bool
  1370  
  1371  	spanidx uint32 // background sweeper position
  1372  
  1373  	nbgsweep    uint32
  1374  	npausesweep uint32
  1375  }
  1376  
  1377  var sweep sweepdata
  1378  
  1379  // sweeps one span
  1380  // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
  1381  func sweepone() uintptr {
  1382  	_g_ := getg()
  1383  
  1384  	// increment locks to ensure that the goroutine is not preempted
  1385  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
  1386  	_g_.m.locks++
  1387  	sg := mheap_.sweepgen
  1388  	for {
  1389  		idx := xadd(&sweep.spanidx, 1) - 1
  1390  		if idx >= uint32(len(work.spans)) {
  1391  			mheap_.sweepdone = 1
  1392  			_g_.m.locks--
  1393  			return ^uintptr(0)
  1394  		}
  1395  		s := work.spans[idx]
  1396  		if s.state != mSpanInUse {
  1397  			s.sweepgen = sg
  1398  			continue
  1399  		}
  1400  		if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
  1401  			continue
  1402  		}
  1403  		npages := s.npages
  1404  		if !mSpan_Sweep(s, false) {
  1405  			npages = 0
  1406  		}
  1407  		_g_.m.locks--
  1408  		return npages
  1409  	}
  1410  }
  1411  
  1412  func gosweepone() uintptr {
  1413  	var ret uintptr
  1414  	systemstack(func() {
  1415  		ret = sweepone()
  1416  	})
  1417  	return ret
  1418  }
  1419  
  1420  func gosweepdone() bool {
  1421  	return mheap_.sweepdone != 0
  1422  }
  1423  
  1424  func gchelper() {
  1425  	_g_ := getg()
  1426  	_g_.m.traceback = 2
  1427  	gchelperstart()
  1428  
  1429  	// parallel mark for over GC roots
  1430  	parfordo(work.markfor)
  1431  	if gcphase != _GCscan {
  1432  		scanblock(0, 0, nil) // blocks in getfull
  1433  	}
  1434  
  1435  	nproc := work.nproc // work.nproc can change right after we increment work.ndone
  1436  	if xadd(&work.ndone, +1) == nproc-1 {
  1437  		notewakeup(&work.alldone)
  1438  	}
  1439  	_g_.m.traceback = 0
  1440  }
  1441  
  1442  func cachestats() {
  1443  	for i := 0; ; i++ {
  1444  		p := allp[i]
  1445  		if p == nil {
  1446  			break
  1447  		}
  1448  		c := p.mcache
  1449  		if c == nil {
  1450  			continue
  1451  		}
  1452  		purgecachedstats(c)
  1453  	}
  1454  }
  1455  
  1456  func flushallmcaches() {
  1457  	for i := 0; ; i++ {
  1458  		p := allp[i]
  1459  		if p == nil {
  1460  			break
  1461  		}
  1462  		c := p.mcache
  1463  		if c == nil {
  1464  			continue
  1465  		}
  1466  		mCache_ReleaseAll(c)
  1467  		stackcache_clear(c)
  1468  	}
  1469  }
  1470  
  1471  func updatememstats(stats *gcstats) {
  1472  	if stats != nil {
  1473  		*stats = gcstats{}
  1474  	}
  1475  	for mp := allm; mp != nil; mp = mp.alllink {
  1476  		if stats != nil {
  1477  			src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
  1478  			dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
  1479  			for i, v := range src {
  1480  				dst[i] += v
  1481  			}
  1482  			mp.gcstats = gcstats{}
  1483  		}
  1484  	}
  1485  
  1486  	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
  1487  	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
  1488  	memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
  1489  		memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
  1490  
  1491  	// Calculate memory allocator stats.
  1492  	// During program execution we only count number of frees and amount of freed memory.
  1493  	// Current number of alive object in the heap and amount of alive heap memory
  1494  	// are calculated by scanning all spans.
  1495  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
  1496  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
  1497  	// plus amount of alive heap memory.
  1498  	memstats.alloc = 0
  1499  	memstats.total_alloc = 0
  1500  	memstats.nmalloc = 0
  1501  	memstats.nfree = 0
  1502  	for i := 0; i < len(memstats.by_size); i++ {
  1503  		memstats.by_size[i].nmalloc = 0
  1504  		memstats.by_size[i].nfree = 0
  1505  	}
  1506  
  1507  	// Flush MCache's to MCentral.
  1508  	systemstack(flushallmcaches)
  1509  
  1510  	// Aggregate local stats.
  1511  	cachestats()
  1512  
  1513  	// Scan all spans and count number of alive objects.
  1514  	lock(&mheap_.lock)
  1515  	for i := uint32(0); i < mheap_.nspan; i++ {
  1516  		s := h_allspans[i]
  1517  		if s.state != mSpanInUse {
  1518  			continue
  1519  		}
  1520  		if s.sizeclass == 0 {
  1521  			memstats.nmalloc++
  1522  			memstats.alloc += uint64(s.elemsize)
  1523  		} else {
  1524  			memstats.nmalloc += uint64(s.ref)
  1525  			memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
  1526  			memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
  1527  		}
  1528  	}
  1529  	unlock(&mheap_.lock)
  1530  
  1531  	// Aggregate by size class.
  1532  	smallfree := uint64(0)
  1533  	memstats.nfree = mheap_.nlargefree
  1534  	for i := 0; i < len(memstats.by_size); i++ {
  1535  		memstats.nfree += mheap_.nsmallfree[i]
  1536  		memstats.by_size[i].nfree = mheap_.nsmallfree[i]
  1537  		memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
  1538  		smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
  1539  	}
  1540  	memstats.nfree += memstats.tinyallocs
  1541  	memstats.nmalloc += memstats.nfree
  1542  
  1543  	// Calculate derived stats.
  1544  	memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
  1545  	memstats.heap_alloc = memstats.alloc
  1546  	memstats.heap_objects = memstats.nmalloc - memstats.nfree
  1547  }
  1548  
  1549  func gcinit() {
  1550  	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
  1551  		gothrow("runtime: size of Workbuf is suboptimal")
  1552  	}
  1553  
  1554  	work.markfor = parforalloc(_MaxGcproc)
  1555  	gcpercent = readgogc()
  1556  	gcdatamask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcdata)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
  1557  	gcbssmask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcbss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
  1558  }
  1559  
  1560  // Called from malloc.go using onM, stopping and starting the world handled in caller.
  1561  func gc_m(start_time int64, eagersweep bool) {
  1562  	_g_ := getg()
  1563  	gp := _g_.m.curg
  1564  	casgstatus(gp, _Grunning, _Gwaiting)
  1565  	gp.waitreason = "garbage collection"
  1566  
  1567  	gc(start_time, eagersweep)
  1568  	casgstatus(gp, _Gwaiting, _Grunning)
  1569  }
  1570  
  1571  // Similar to clearcheckmarkbits but works on a single span.
  1572  // It preforms two tasks.
  1573  // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
  1574  //    for nibbles with the BoundaryBit set.
  1575  // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
  1576  //    BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
  1577  // For the second case it is possible to restore the BitsDead pattern but since
  1578  // clearmark is a debug tool performance has a lower priority than simplicity.
  1579  // The span is MSpanInUse and the world is stopped.
  1580  func clearcheckmarkbitsspan(s *mspan) {
  1581  	if s.state != _MSpanInUse {
  1582  		print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
  1583  		gothrow("clearcheckmarkbitsspan: bad span state")
  1584  	}
  1585  
  1586  	arena_start := mheap_.arena_start
  1587  	cl := s.sizeclass
  1588  	size := s.elemsize
  1589  	var n int32
  1590  	if cl == 0 {
  1591  		n = 1
  1592  	} else {
  1593  		// Chunk full of small blocks
  1594  		npages := class_to_allocnpages[cl]
  1595  		n = npages << _PageShift / int32(size)
  1596  	}
  1597  
  1598  	// MSpan_Sweep has similar code but instead of overloading and
  1599  	// complicating that routine we do a simpler walk here.
  1600  	// Sweep through n objects of given size starting at p.
  1601  	// This thread owns the span now, so it can manipulate
  1602  	// the block bitmap without atomic operations.
  1603  	p := uintptr(s.start) << _PageShift
  1604  
  1605  	// Find bits for the beginning of the span.
  1606  	off := (p - arena_start) / ptrSize
  1607  	bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  1608  	step := size / (ptrSize * wordsPerBitmapByte)
  1609  
  1610  	// The type bit values are:
  1611  	//	00 - BitsDead, for us BitsScalarMarked
  1612  	//	01 - BitsScalar
  1613  	//	10 - BitsPointer
  1614  	//	11 - unused, for us BitsPointerMarked
  1615  	//
  1616  	// When called to prepare for the checkmark phase (checkmark==1),
  1617  	// we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
  1618  	// type bits anywhere.
  1619  	//
  1620  	// The checkmark phase marks by changing BitsScalar to BitsScalarMarked
  1621  	// and BitsPointer to BitsPointerMarked.
  1622  	//
  1623  	// When called to clean up after the checkmark phase (checkmark==0),
  1624  	// we unmark by changing BitsScalarMarked back to BitsScalar and
  1625  	// BitsPointerMarked back to BitsPointer.
  1626  	//
  1627  	// There are two problems with the scheme as just described.
  1628  	// First, the setup rewrites BitsDead to BitsScalar, but the type bits
  1629  	// following a BitsDead are uninitialized and must not be used.
  1630  	// Second, objects that are free are expected to have their type
  1631  	// bits zeroed (BitsDead), so in the cleanup we need to restore
  1632  	// any BitsDeads that were there originally.
  1633  	//
  1634  	// In a one-word object (8-byte allocation on 64-bit system),
  1635  	// there is no difference between BitsScalar and BitsDead, because
  1636  	// neither is a pointer and there are no more words in the object,
  1637  	// so using BitsScalar during the checkmark is safe and mapping
  1638  	// both back to BitsDead during cleanup is also safe.
  1639  	//
  1640  	// In a larger object, we need to be more careful. During setup,
  1641  	// if the type of the first word is BitsDead, we change it to BitsScalar
  1642  	// (as we must) but also initialize the type of the second
  1643  	// word to BitsDead, so that a scan during the checkmark phase
  1644  	// will still stop before seeing the uninitialized type bits in the
  1645  	// rest of the object. The sequence 'BitsScalar BitsDead' never
  1646  	// happens in real type bitmaps - BitsDead is always as early
  1647  	// as possible, so immediately after the last BitsPointer.
  1648  	// During cleanup, if we see a BitsScalar, we can check to see if it
  1649  	// is followed by BitsDead. If so, it was originally BitsDead and
  1650  	// we can change it back.
  1651  
  1652  	if step == 0 {
  1653  		// updating top and bottom nibbles, all boundaries
  1654  		for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
  1655  			if *bitp&bitBoundary == 0 {
  1656  				gothrow("missing bitBoundary")
  1657  			}
  1658  			b := (*bitp & bitPtrMask) >> 2
  1659  			if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
  1660  				*bitp &^= 0x0c // convert to _BitsDead
  1661  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1662  				*bitp &^= _BitsCheckMarkXor << 2
  1663  			}
  1664  
  1665  			if (*bitp>>gcBits)&bitBoundary == 0 {
  1666  				gothrow("missing bitBoundary")
  1667  			}
  1668  			b = ((*bitp >> gcBits) & bitPtrMask) >> 2
  1669  			if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
  1670  				*bitp &^= 0xc0 // convert to _BitsDead
  1671  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1672  				*bitp &^= _BitsCheckMarkXor << (2 + gcBits)
  1673  			}
  1674  		}
  1675  	} else {
  1676  		// updating bottom nibble for first word of each object
  1677  		for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
  1678  			if *bitp&bitBoundary == 0 {
  1679  				gothrow("missing bitBoundary")
  1680  			}
  1681  			b := (*bitp & bitPtrMask) >> 2
  1682  
  1683  			if checkmark && b == _BitsDead {
  1684  				// move BitsDead into second word.
  1685  				// set bits to BitsScalar in preparation for checkmark phase.
  1686  				*bitp &^= 0xc0
  1687  				*bitp |= _BitsScalar << 2
  1688  			} else if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) && *bitp&0xc0 == 0 {
  1689  				// Cleaning up after checkmark phase.
  1690  				// First word is scalar or dead (we forgot)
  1691  				// and second word is dead.
  1692  				// First word might as well be dead too.
  1693  				*bitp &^= 0x0c
  1694  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1695  				*bitp ^= _BitsCheckMarkXor << 2
  1696  			}
  1697  		}
  1698  	}
  1699  }
  1700  
  1701  // clearcheckmarkbits preforms two tasks.
  1702  // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
  1703  //    for nibbles with the BoundaryBit set.
  1704  // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
  1705  //    BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
  1706  // This is a bit expensive but preserves the BitsDead encoding during the normal marking.
  1707  // BitsDead remains valid for every nibble except the ones with BitsBoundary set.
  1708  func clearcheckmarkbits() {
  1709  	for _, s := range work.spans {
  1710  		if s.state == _MSpanInUse {
  1711  			clearcheckmarkbitsspan(s)
  1712  		}
  1713  	}
  1714  }
  1715  
  1716  // Called from malloc.go using onM.
  1717  // The world is stopped. Rerun the scan and mark phases
  1718  // using the bitMarkedCheck bit instead of the
  1719  // bitMarked bit. If the marking encounters an
  1720  // bitMarked bit that is not set then we throw.
  1721  func gccheckmark_m(startTime int64, eagersweep bool) {
  1722  	if !gccheckmarkenable {
  1723  		return
  1724  	}
  1725  
  1726  	if checkmark {
  1727  		gothrow("gccheckmark_m, entered with checkmark already true")
  1728  	}
  1729  
  1730  	checkmark = true
  1731  	clearcheckmarkbits()        // Converts BitsDead to BitsScalar.
  1732  	gc_m(startTime, eagersweep) // turns off checkmark
  1733  	// Work done, fixed up the GC bitmap to remove the checkmark bits.
  1734  	clearcheckmarkbits()
  1735  }
  1736  
  1737  func gccheckmarkenable_m() {
  1738  	gccheckmarkenable = true
  1739  }
  1740  
  1741  func gccheckmarkdisable_m() {
  1742  	gccheckmarkenable = false
  1743  }
  1744  
  1745  func finishsweep_m() {
  1746  	// The world is stopped so we should be able to complete the sweeps
  1747  	// quickly.
  1748  	for sweepone() != ^uintptr(0) {
  1749  		sweep.npausesweep++
  1750  	}
  1751  
  1752  	// There may be some other spans being swept concurrently that
  1753  	// we need to wait for. If finishsweep_m is done with the world stopped
  1754  	// this code is not required.
  1755  	sg := mheap_.sweepgen
  1756  	for _, s := range work.spans {
  1757  		if s.sweepgen != sg && s.state == _MSpanInUse {
  1758  			mSpan_EnsureSwept(s)
  1759  		}
  1760  	}
  1761  }
  1762  
  1763  // Scan all of the stacks, greying (or graying if in America) the referents
  1764  // but not blackening them since the mark write barrier isn't installed.
  1765  func gcscan_m() {
  1766  	_g_ := getg()
  1767  
  1768  	// Grab the g that called us and potentially allow rescheduling.
  1769  	// This allows it to be scanned like other goroutines.
  1770  	mastergp := _g_.m.curg
  1771  	casgstatus(mastergp, _Grunning, _Gwaiting)
  1772  	mastergp.waitreason = "garbage collection scan"
  1773  
  1774  	// Span sweeping has been done by finishsweep_m.
  1775  	// Long term we will want to make this goroutine runnable
  1776  	// by placing it onto a scanenqueue state and then calling
  1777  	// runtimeĀ·restartg(mastergp) to make it Grunnable.
  1778  	// At the bottom we will want to return this p back to the scheduler.
  1779  	oldphase := gcphase
  1780  
  1781  	// Prepare flag indicating that the scan has not been completed.
  1782  	lock(&allglock)
  1783  	local_allglen := allglen
  1784  	for i := uintptr(0); i < local_allglen; i++ {
  1785  		gp := allgs[i]
  1786  		gp.gcworkdone = false // set to true in gcphasework
  1787  	}
  1788  	unlock(&allglock)
  1789  
  1790  	work.nwait = 0
  1791  	work.ndone = 0
  1792  	work.nproc = 1 // For now do not do this in parallel.
  1793  	gcphase = _GCscan
  1794  	//	ackgcphase is not needed since we are not scanning running goroutines.
  1795  	parforsetup(work.markfor, work.nproc, uint32(_RootCount+local_allglen), nil, false, markroot)
  1796  	parfordo(work.markfor)
  1797  
  1798  	lock(&allglock)
  1799  	// Check that gc work is done.
  1800  	for i := uintptr(0); i < local_allglen; i++ {
  1801  		gp := allgs[i]
  1802  		if !gp.gcworkdone {
  1803  			gothrow("scan missed a g")
  1804  		}
  1805  	}
  1806  	unlock(&allglock)
  1807  
  1808  	gcphase = oldphase
  1809  	casgstatus(mastergp, _Gwaiting, _Grunning)
  1810  	// Let the g that called us continue to run.
  1811  }
  1812  
  1813  // Mark all objects that are known about.
  1814  func gcmark_m() {
  1815  	scanblock(0, 0, nil)
  1816  }
  1817  
  1818  // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
  1819  // all go routines see the new barrier.
  1820  func gcinstallmarkwb_m() {
  1821  	gcphase = _GCmark
  1822  }
  1823  
  1824  // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
  1825  // all go routines see the new barrier.
  1826  func gcinstalloffwb_m() {
  1827  	gcphase = _GCoff
  1828  }
  1829  
  1830  func gc(start_time int64, eagersweep bool) {
  1831  	if _DebugGCPtrs {
  1832  		print("GC start\n")
  1833  	}
  1834  
  1835  	if debug.allocfreetrace > 0 {
  1836  		tracegc()
  1837  	}
  1838  
  1839  	_g_ := getg()
  1840  	_g_.m.traceback = 2
  1841  	t0 := start_time
  1842  	work.tstart = start_time
  1843  
  1844  	var t1 int64
  1845  	if debug.gctrace > 0 {
  1846  		t1 = nanotime()
  1847  	}
  1848  
  1849  	if !checkmark {
  1850  		finishsweep_m() // skip during checkmark debug phase.
  1851  	}
  1852  
  1853  	// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
  1854  	// resizing/freeing allspans.
  1855  	// New spans can be created while GC progresses, but they are not garbage for
  1856  	// this round:
  1857  	//  - new stack spans can be created even while the world is stopped.
  1858  	//  - new malloc spans can be created during the concurrent sweep
  1859  
  1860  	// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
  1861  	lock(&mheap_.lock)
  1862  	// Free the old cached sweep array if necessary.
  1863  	if work.spans != nil && &work.spans[0] != &h_allspans[0] {
  1864  		sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
  1865  	}
  1866  	// Cache the current array for marking.
  1867  	mheap_.gcspans = mheap_.allspans
  1868  	work.spans = h_allspans
  1869  	unlock(&mheap_.lock)
  1870  	oldphase := gcphase
  1871  
  1872  	work.nwait = 0
  1873  	work.ndone = 0
  1874  	work.nproc = uint32(gcprocs())
  1875  	gcphase = _GCmarktermination
  1876  
  1877  	// World is stopped so allglen will not change.
  1878  	for i := uintptr(0); i < allglen; i++ {
  1879  		gp := allgs[i]
  1880  		gp.gcworkdone = false // set to true in gcphasework
  1881  	}
  1882  
  1883  	parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot)
  1884  	if work.nproc > 1 {
  1885  		noteclear(&work.alldone)
  1886  		helpgc(int32(work.nproc))
  1887  	}
  1888  
  1889  	var t2 int64
  1890  	if debug.gctrace > 0 {
  1891  		t2 = nanotime()
  1892  	}
  1893  
  1894  	gchelperstart()
  1895  	parfordo(work.markfor)
  1896  	scanblock(0, 0, nil)
  1897  
  1898  	if work.full != 0 {
  1899  		gothrow("work.full != 0")
  1900  	}
  1901  	if work.partial != 0 {
  1902  		gothrow("work.partial != 0")
  1903  	}
  1904  
  1905  	gcphase = oldphase
  1906  	var t3 int64
  1907  	if debug.gctrace > 0 {
  1908  		t3 = nanotime()
  1909  	}
  1910  
  1911  	if work.nproc > 1 {
  1912  		notesleep(&work.alldone)
  1913  	}
  1914  
  1915  	shrinkfinish()
  1916  
  1917  	cachestats()
  1918  	// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
  1919  	// estimate what was live heap size after previous GC (for printing only)
  1920  	heap0 := memstats.next_gc * 100 / (uint64(gcpercent) + 100)
  1921  	// conservatively set next_gc to high value assuming that everything is live
  1922  	// concurrent/lazy sweep will reduce this number while discovering new garbage
  1923  	memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100
  1924  
  1925  	t4 := nanotime()
  1926  	atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
  1927  	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(t4 - t0)
  1928  	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(t4)
  1929  	memstats.pause_total_ns += uint64(t4 - t0)
  1930  	memstats.numgc++
  1931  	if memstats.debuggc {
  1932  		print("pause ", t4-t0, "\n")
  1933  	}
  1934  
  1935  	if debug.gctrace > 0 {
  1936  		heap1 := memstats.heap_alloc
  1937  		var stats gcstats
  1938  		updatememstats(&stats)
  1939  		if heap1 != memstats.heap_alloc {
  1940  			print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
  1941  			gothrow("mstats skew")
  1942  		}
  1943  		obj := memstats.nmalloc - memstats.nfree
  1944  
  1945  		stats.nprocyield += work.markfor.nprocyield
  1946  		stats.nosyield += work.markfor.nosyield
  1947  		stats.nsleep += work.markfor.nsleep
  1948  
  1949  		print("gc", memstats.numgc, "(", work.nproc, "): ",
  1950  			(t1-t0)/1000, "+", (t2-t1)/1000, "+", (t3-t2)/1000, "+", (t4-t3)/1000, " us, ",
  1951  			heap0>>20, " -> ", heap1>>20, " MB, ",
  1952  			obj, " (", memstats.nmalloc, "-", memstats.nfree, ") objects, ",
  1953  			gcount(), " goroutines, ",
  1954  			len(work.spans), "/", sweep.nbgsweep, "/", sweep.npausesweep, " sweeps, ",
  1955  			stats.nhandoff, "(", stats.nhandoffcnt, ") handoff, ",
  1956  			work.markfor.nsteal, "(", work.markfor.nstealcnt, ") steal, ",
  1957  			stats.nprocyield, "/", stats.nosyield, "/", stats.nsleep, " yields\n")
  1958  		sweep.nbgsweep = 0
  1959  		sweep.npausesweep = 0
  1960  	}
  1961  
  1962  	// See the comment in the beginning of this function as to why we need the following.
  1963  	// Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
  1964  	lock(&mheap_.lock)
  1965  	// Free the old cached mark array if necessary.
  1966  	if work.spans != nil && &work.spans[0] != &h_allspans[0] {
  1967  		sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
  1968  	}
  1969  
  1970  	if gccheckmarkenable {
  1971  		if !checkmark {
  1972  			// first half of two-pass; don't set up sweep
  1973  			unlock(&mheap_.lock)
  1974  			return
  1975  		}
  1976  		checkmark = false // done checking marks
  1977  	}
  1978  
  1979  	// Cache the current array for sweeping.
  1980  	mheap_.gcspans = mheap_.allspans
  1981  	mheap_.sweepgen += 2
  1982  	mheap_.sweepdone = 0
  1983  	work.spans = h_allspans
  1984  	sweep.spanidx = 0
  1985  	unlock(&mheap_.lock)
  1986  
  1987  	if _ConcurrentSweep && !eagersweep {
  1988  		lock(&gclock)
  1989  		if !sweep.started {
  1990  			go bgsweep()
  1991  			sweep.started = true
  1992  		} else if sweep.parked {
  1993  			sweep.parked = false
  1994  			ready(sweep.g)
  1995  		}
  1996  		unlock(&gclock)
  1997  	} else {
  1998  		// Sweep all spans eagerly.
  1999  		for sweepone() != ^uintptr(0) {
  2000  			sweep.npausesweep++
  2001  		}
  2002  		// Do an additional mProf_GC, because all 'free' events are now real as well.
  2003  		mProf_GC()
  2004  	}
  2005  
  2006  	mProf_GC()
  2007  	_g_.m.traceback = 0
  2008  
  2009  	if _DebugGCPtrs {
  2010  		print("GC end\n")
  2011  	}
  2012  }
  2013  
  2014  func readmemstats_m(stats *MemStats) {
  2015  	updatememstats(nil)
  2016  
  2017  	// Size of the trailing by_size array differs between Go and C,
  2018  	// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
  2019  	memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
  2020  
  2021  	// Stack numbers are part of the heap numbers, separate those out for user consumption
  2022  	stats.StackSys = stats.StackInuse
  2023  	stats.HeapInuse -= stats.StackInuse
  2024  	stats.HeapSys -= stats.StackInuse
  2025  }
  2026  
  2027  //go:linkname readGCStats runtime/debug.readGCStats
  2028  func readGCStats(pauses *[]uint64) {
  2029  	systemstack(func() {
  2030  		readGCStats_m(pauses)
  2031  	})
  2032  }
  2033  
  2034  func readGCStats_m(pauses *[]uint64) {
  2035  	p := *pauses
  2036  	// Calling code in runtime/debug should make the slice large enough.
  2037  	if cap(p) < len(memstats.pause_ns)+3 {
  2038  		gothrow("runtime: short slice passed to readGCStats")
  2039  	}
  2040  
  2041  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
  2042  	lock(&mheap_.lock)
  2043  
  2044  	n := memstats.numgc
  2045  	if n > uint32(len(memstats.pause_ns)) {
  2046  		n = uint32(len(memstats.pause_ns))
  2047  	}
  2048  
  2049  	// The pause buffer is circular. The most recent pause is at
  2050  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
  2051  	// from there to go back farther in time. We deliver the times
  2052  	// most recent first (in p[0]).
  2053  	p = p[:cap(p)]
  2054  	for i := uint32(0); i < n; i++ {
  2055  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
  2056  		p[i] = memstats.pause_ns[j]
  2057  		p[n+i] = memstats.pause_end[j]
  2058  	}
  2059  
  2060  	p[n+n] = memstats.last_gc
  2061  	p[n+n+1] = uint64(memstats.numgc)
  2062  	p[n+n+2] = memstats.pause_total_ns
  2063  	unlock(&mheap_.lock)
  2064  	*pauses = p[:n+n+3]
  2065  }
  2066  
  2067  func setGCPercent(in int32) (out int32) {
  2068  	lock(&mheap_.lock)
  2069  	out = gcpercent
  2070  	if in < 0 {
  2071  		in = -1
  2072  	}
  2073  	gcpercent = in
  2074  	unlock(&mheap_.lock)
  2075  	return out
  2076  }
  2077  
  2078  func gchelperstart() {
  2079  	_g_ := getg()
  2080  
  2081  	if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
  2082  		gothrow("gchelperstart: bad m->helpgc")
  2083  	}
  2084  	if _g_ != _g_.m.g0 {
  2085  		gothrow("gchelper not running on g0 stack")
  2086  	}
  2087  }
  2088  
  2089  func wakefing() *g {
  2090  	var res *g
  2091  	lock(&finlock)
  2092  	if fingwait && fingwake {
  2093  		fingwait = false
  2094  		fingwake = false
  2095  		res = fing
  2096  	}
  2097  	unlock(&finlock)
  2098  	return res
  2099  }
  2100  
  2101  func addb(p *byte, n uintptr) *byte {
  2102  	return (*byte)(add(unsafe.Pointer(p), n))
  2103  }
  2104  
  2105  // Recursively unrolls GC program in prog.
  2106  // mask is where to store the result.
  2107  // ppos is a pointer to position in mask, in bits.
  2108  // sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
  2109  func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool) *byte {
  2110  	arena_start := mheap_.arena_start
  2111  	pos := *ppos
  2112  	mask := (*[1 << 30]byte)(unsafe.Pointer(maskp))
  2113  	for {
  2114  		switch *prog {
  2115  		default:
  2116  			gothrow("unrollgcprog: unknown instruction")
  2117  
  2118  		case insData:
  2119  			prog = addb(prog, 1)
  2120  			siz := int(*prog)
  2121  			prog = addb(prog, 1)
  2122  			p := (*[1 << 30]byte)(unsafe.Pointer(prog))
  2123  			for i := 0; i < siz; i++ {
  2124  				v := p[i/_PointersPerByte]
  2125  				v >>= (uint(i) % _PointersPerByte) * _BitsPerPointer
  2126  				v &= _BitsMask
  2127  				if inplace {
  2128  					// Store directly into GC bitmap.
  2129  					off := (uintptr(unsafe.Pointer(&mask[pos])) - arena_start) / ptrSize
  2130  					bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2131  					shift := (off % wordsPerBitmapByte) * gcBits
  2132  					if shift == 0 {
  2133  						*bitp = 0
  2134  					}
  2135  					*bitp |= v << (shift + 2)
  2136  					pos += ptrSize
  2137  				} else if sparse {
  2138  					// 4-bits per word
  2139  					v <<= (pos % 8) + 2
  2140  					mask[pos/8] |= v
  2141  					pos += gcBits
  2142  				} else {
  2143  					// 2-bits per word
  2144  					v <<= pos % 8
  2145  					mask[pos/8] |= v
  2146  					pos += _BitsPerPointer
  2147  				}
  2148  			}
  2149  			prog = addb(prog, round(uintptr(siz)*_BitsPerPointer, 8)/8)
  2150  
  2151  		case insArray:
  2152  			prog = (*byte)(add(unsafe.Pointer(prog), 1))
  2153  			siz := uintptr(0)
  2154  			for i := uintptr(0); i < ptrSize; i++ {
  2155  				siz = (siz << 8) + uintptr(*(*byte)(add(unsafe.Pointer(prog), ptrSize-i-1)))
  2156  			}
  2157  			prog = (*byte)(add(unsafe.Pointer(prog), ptrSize))
  2158  			var prog1 *byte
  2159  			for i := uintptr(0); i < siz; i++ {
  2160  				prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
  2161  			}
  2162  			if *prog1 != insArrayEnd {
  2163  				gothrow("unrollgcprog: array does not end with insArrayEnd")
  2164  			}
  2165  			prog = (*byte)(add(unsafe.Pointer(prog1), 1))
  2166  
  2167  		case insArrayEnd, insEnd:
  2168  			*ppos = pos
  2169  			return prog
  2170  		}
  2171  	}
  2172  }
  2173  
  2174  // Unrolls GC program prog for data/bss, returns dense GC mask.
  2175  func unrollglobgcprog(prog *byte, size uintptr) bitvector {
  2176  	masksize := round(round(size, ptrSize)/ptrSize*bitsPerPointer, 8) / 8
  2177  	mask := (*[1 << 30]byte)(persistentalloc(masksize+1, 0, &memstats.gc_sys))
  2178  	mask[masksize] = 0xa1
  2179  	pos := uintptr(0)
  2180  	prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
  2181  	if pos != size/ptrSize*bitsPerPointer {
  2182  		print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
  2183  		gothrow("unrollglobgcprog: bad program size")
  2184  	}
  2185  	if *prog != insEnd {
  2186  		gothrow("unrollglobgcprog: program does not end with insEnd")
  2187  	}
  2188  	if mask[masksize] != 0xa1 {
  2189  		gothrow("unrollglobgcprog: overflow")
  2190  	}
  2191  	return bitvector{int32(masksize * 8), &mask[0]}
  2192  }
  2193  
  2194  func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
  2195  	pos := uintptr(0)
  2196  	prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2197  	for pos != size0 {
  2198  		unrollgcprog1((*byte)(v), prog, &pos, true, true)
  2199  	}
  2200  
  2201  	// Mark first word as bitAllocated.
  2202  	arena_start := mheap_.arena_start
  2203  	off := (uintptr(v) - arena_start) / ptrSize
  2204  	bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2205  	shift := (off % wordsPerBitmapByte) * gcBits
  2206  	*bitp |= bitBoundary << shift
  2207  
  2208  	// Mark word after last as BitsDead.
  2209  	if size0 < size {
  2210  		off := (uintptr(v) + size0 - arena_start) / ptrSize
  2211  		bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2212  		shift := (off % wordsPerBitmapByte) * gcBits
  2213  		*bitp &= uint8(^(bitPtrMask << shift) | uintptr(bitsDead)<<(shift+2))
  2214  	}
  2215  }
  2216  
  2217  var unroll mutex
  2218  
  2219  // Unrolls GC program in typ.gc[1] into typ.gc[0]
  2220  func unrollgcprog_m(typ *_type) {
  2221  	lock(&unroll)
  2222  	mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0])))
  2223  	if *mask == 0 {
  2224  		pos := uintptr(8) // skip the unroll flag
  2225  		prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2226  		prog = unrollgcprog1(mask, prog, &pos, false, true)
  2227  		if *prog != insEnd {
  2228  			gothrow("unrollgcprog: program does not end with insEnd")
  2229  		}
  2230  		if typ.size/ptrSize%2 != 0 {
  2231  			// repeat the program
  2232  			prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2233  			unrollgcprog1(mask, prog, &pos, false, true)
  2234  		}
  2235  
  2236  		// atomic way to say mask[0] = 1
  2237  		atomicor8(mask, 1)
  2238  	}
  2239  	unlock(&unroll)
  2240  }
  2241  
  2242  // mark the span of memory at v as having n blocks of the given size.
  2243  // if leftover is true, there is left over space at the end of the span.
  2244  func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
  2245  	if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
  2246  		gothrow("markspan: bad pointer")
  2247  	}
  2248  
  2249  	// Find bits of the beginning of the span.
  2250  	off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
  2251  	if off%wordsPerBitmapByte != 0 {
  2252  		gothrow("markspan: unaligned length")
  2253  	}
  2254  	b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2255  
  2256  	// Okay to use non-atomic ops here, because we control
  2257  	// the entire span, and each bitmap byte has bits for only
  2258  	// one span, so no other goroutines are changing these bitmap words.
  2259  
  2260  	if size == ptrSize {
  2261  		// Possible only on 64-bits (minimal size class is 8 bytes).
  2262  		// Set memory to 0x11.
  2263  		if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
  2264  			gothrow("markspan: bad bits")
  2265  		}
  2266  		if n%(wordsPerBitmapByte*ptrSize) != 0 {
  2267  			gothrow("markspan: unaligned length")
  2268  		}
  2269  		b = b - n/wordsPerBitmapByte + 1 // find first byte
  2270  		if b%ptrSize != 0 {
  2271  			gothrow("markspan: unaligned pointer")
  2272  		}
  2273  		for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
  2274  			*(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
  2275  		}
  2276  		return
  2277  	}
  2278  
  2279  	if leftover {
  2280  		n++ // mark a boundary just past end of last block too
  2281  	}
  2282  	step := size / (ptrSize * wordsPerBitmapByte)
  2283  	for i := uintptr(0); i < n; i, b = i+1, b-step {
  2284  		*(*byte)(unsafe.Pointer(b)) = bitBoundary | bitsDead<<2
  2285  	}
  2286  }
  2287  
  2288  // unmark the span of memory at v of length n bytes.
  2289  func unmarkspan(v, n uintptr) {
  2290  	if v+n > mheap_.arena_used || v < mheap_.arena_start {
  2291  		gothrow("markspan: bad pointer")
  2292  	}
  2293  
  2294  	off := (v - mheap_.arena_start) / ptrSize // word offset
  2295  	if off%(ptrSize*wordsPerBitmapByte) != 0 {
  2296  		gothrow("markspan: unaligned pointer")
  2297  	}
  2298  
  2299  	b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2300  	n /= ptrSize
  2301  	if n%(ptrSize*wordsPerBitmapByte) != 0 {
  2302  		gothrow("unmarkspan: unaligned length")
  2303  	}
  2304  
  2305  	// Okay to use non-atomic ops here, because we control
  2306  	// the entire span, and each bitmap word has bits for only
  2307  	// one span, so no other goroutines are changing these
  2308  	// bitmap words.
  2309  	n /= wordsPerBitmapByte
  2310  	memclr(unsafe.Pointer(b-n+1), n)
  2311  }
  2312  
  2313  func mHeap_MapBits(h *mheap) {
  2314  	// Caller has added extra mappings to the arena.
  2315  	// Add extra mappings of bitmap words as needed.
  2316  	// We allocate extra bitmap pieces in chunks of bitmapChunk.
  2317  	const bitmapChunk = 8192
  2318  
  2319  	n := (h.arena_used - h.arena_start) / (ptrSize * wordsPerBitmapByte)
  2320  	n = round(n, bitmapChunk)
  2321  	n = round(n, _PhysPageSize)
  2322  	if h.bitmap_mapped >= n {
  2323  		return
  2324  	}
  2325  
  2326  	sysMap(unsafe.Pointer(h.arena_start-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
  2327  	h.bitmap_mapped = n
  2328  }
  2329  
  2330  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  2331  	target := (*stkframe)(ctxt)
  2332  	if frame.sp <= target.sp && target.sp < frame.varp {
  2333  		*target = *frame
  2334  		return false
  2335  	}
  2336  	return true
  2337  }
  2338  
  2339  // Returns GC type info for object p for testing.
  2340  func getgcmask(p unsafe.Pointer, t *_type, mask **byte, len *uintptr) {
  2341  	*mask = nil
  2342  	*len = 0
  2343  
  2344  	// data
  2345  	if uintptr(unsafe.Pointer(&data)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&edata)) {
  2346  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2347  		*len = n / ptrSize
  2348  		*mask = &make([]byte, *len)[0]
  2349  		for i := uintptr(0); i < n; i += ptrSize {
  2350  			off := (uintptr(p) + i - uintptr(unsafe.Pointer(&data))) / ptrSize
  2351  			bits := (*(*byte)(add(unsafe.Pointer(gcdatamask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
  2352  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2353  		}
  2354  		return
  2355  	}
  2356  
  2357  	// bss
  2358  	if uintptr(unsafe.Pointer(&bss)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&ebss)) {
  2359  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2360  		*len = n / ptrSize
  2361  		*mask = &make([]byte, *len)[0]
  2362  		for i := uintptr(0); i < n; i += ptrSize {
  2363  			off := (uintptr(p) + i - uintptr(unsafe.Pointer(&bss))) / ptrSize
  2364  			bits := (*(*byte)(add(unsafe.Pointer(gcbssmask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
  2365  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2366  		}
  2367  		return
  2368  	}
  2369  
  2370  	// heap
  2371  	var n uintptr
  2372  	var base uintptr
  2373  	if mlookup(uintptr(p), &base, &n, nil) != 0 {
  2374  		*len = n / ptrSize
  2375  		*mask = &make([]byte, *len)[0]
  2376  		for i := uintptr(0); i < n; i += ptrSize {
  2377  			off := (uintptr(base) + i - mheap_.arena_start) / ptrSize
  2378  			b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2379  			shift := (off % wordsPerBitmapByte) * gcBits
  2380  			bits := (*(*byte)(unsafe.Pointer(b)) >> (shift + 2)) & bitsMask
  2381  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2382  		}
  2383  		return
  2384  	}
  2385  
  2386  	// stack
  2387  	var frame stkframe
  2388  	frame.sp = uintptr(p)
  2389  	_g_ := getg()
  2390  	gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  2391  	if frame.fn != nil {
  2392  		f := frame.fn
  2393  		targetpc := frame.continpc
  2394  		if targetpc == 0 {
  2395  			return
  2396  		}
  2397  		if targetpc != f.entry {
  2398  			targetpc--
  2399  		}
  2400  		pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
  2401  		if pcdata == -1 {
  2402  			return
  2403  		}
  2404  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  2405  		if stkmap == nil || stkmap.n <= 0 {
  2406  			return
  2407  		}
  2408  		bv := stackmapdata(stkmap, pcdata)
  2409  		size := uintptr(bv.n) / bitsPerPointer * ptrSize
  2410  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2411  		*len = n / ptrSize
  2412  		*mask = &make([]byte, *len)[0]
  2413  		for i := uintptr(0); i < n; i += ptrSize {
  2414  			off := (uintptr(p) + i - frame.varp + size) / ptrSize
  2415  			bits := ((*(*byte)(add(unsafe.Pointer(bv.bytedata), off*bitsPerPointer/8))) >> ((off * bitsPerPointer) % 8)) & bitsMask
  2416  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2417  		}
  2418  	}
  2419  }
  2420  
  2421  func unixnanotime() int64 {
  2422  	var now int64
  2423  	gc_unixnanotime(&now)
  2424  	return now
  2425  }