github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  var Fadd64 = fadd64
    18  var Fsub64 = fsub64
    19  var Fmul64 = fmul64
    20  var Fdiv64 = fdiv64
    21  var F64to32 = f64to32
    22  var F32to64 = f32to64
    23  var Fcmp64 = fcmp64
    24  var Fintto64 = fintto64
    25  var F64toint = f64toint
    26  
    27  var Entersyscall = entersyscall
    28  var Exitsyscall = exitsyscall
    29  var LockedOSThread = lockedOSThread
    30  var Xadduintptr = atomic.Xadduintptr
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  var ParseByteCount = parseByteCount
    37  
    38  var Nanotime = nanotime
    39  var NetpollBreak = netpollBreak
    40  var Usleep = usleep
    41  
    42  var PhysPageSize = physPageSize
    43  var PhysHugePageSize = physHugePageSize
    44  
    45  var NetpollGenericInit = netpollGenericInit
    46  
    47  var Memmove = memmove
    48  var MemclrNoHeapPointers = memclrNoHeapPointers
    49  
    50  var LockPartialOrder = lockPartialOrder
    51  
    52  type LockRank lockRank
    53  
    54  func (l LockRank) String() string {
    55  	return lockRank(l).String()
    56  }
    57  
    58  const PreemptMSupported = preemptMSupported
    59  
    60  type LFNode struct {
    61  	Next    uint64
    62  	Pushcnt uintptr
    63  }
    64  
    65  func LFStackPush(head *uint64, node *LFNode) {
    66  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    67  }
    68  
    69  func LFStackPop(head *uint64) *LFNode {
    70  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    71  }
    72  
    73  func Netpoll(delta int64) {
    74  	systemstack(func() {
    75  		netpoll(delta)
    76  	})
    77  }
    78  
    79  func GCMask(x any) (ret []byte) {
    80  	systemstack(func() {
    81  		ret = getgcmask(x)
    82  	})
    83  	return
    84  }
    85  
    86  func RunSchedLocalQueueTest() {
    87  	pp := new(p)
    88  	gs := make([]g, len(pp.runq))
    89  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
    90  	for i := 0; i < len(pp.runq); i++ {
    91  		if g, _ := runqget(pp); g != nil {
    92  			throw("runq is not empty initially")
    93  		}
    94  		for j := 0; j < i; j++ {
    95  			runqput(pp, &gs[i], false)
    96  		}
    97  		for j := 0; j < i; j++ {
    98  			if g, _ := runqget(pp); g != &gs[i] {
    99  				print("bad element at iter ", i, "/", j, "\n")
   100  				throw("bad element")
   101  			}
   102  		}
   103  		if g, _ := runqget(pp); g != nil {
   104  			throw("runq is not empty afterwards")
   105  		}
   106  	}
   107  }
   108  
   109  func RunSchedLocalQueueStealTest() {
   110  	p1 := new(p)
   111  	p2 := new(p)
   112  	gs := make([]g, len(p1.runq))
   113  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   114  	for i := 0; i < len(p1.runq); i++ {
   115  		for j := 0; j < i; j++ {
   116  			gs[j].sig = 0
   117  			runqput(p1, &gs[j], false)
   118  		}
   119  		gp := runqsteal(p2, p1, true)
   120  		s := 0
   121  		if gp != nil {
   122  			s++
   123  			gp.sig++
   124  		}
   125  		for {
   126  			gp, _ = runqget(p2)
   127  			if gp == nil {
   128  				break
   129  			}
   130  			s++
   131  			gp.sig++
   132  		}
   133  		for {
   134  			gp, _ = runqget(p1)
   135  			if gp == nil {
   136  				break
   137  			}
   138  			gp.sig++
   139  		}
   140  		for j := 0; j < i; j++ {
   141  			if gs[j].sig != 1 {
   142  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   143  				throw("bad element")
   144  			}
   145  		}
   146  		if s != i/2 && s != i/2+1 {
   147  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   148  			throw("bad steal")
   149  		}
   150  	}
   151  }
   152  
   153  func RunSchedLocalQueueEmptyTest(iters int) {
   154  	// Test that runq is not spuriously reported as empty.
   155  	// Runq emptiness affects scheduling decisions and spurious emptiness
   156  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   157  	// for arbitrary long time).
   158  	done := make(chan bool, 1)
   159  	p := new(p)
   160  	gs := make([]g, 2)
   161  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   162  	ready := new(uint32)
   163  	for i := 0; i < iters; i++ {
   164  		*ready = 0
   165  		next0 := (i & 1) == 0
   166  		next1 := (i & 2) == 0
   167  		runqput(p, &gs[0], next0)
   168  		go func() {
   169  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   170  			}
   171  			if runqempty(p) {
   172  				println("next:", next0, next1)
   173  				throw("queue is empty")
   174  			}
   175  			done <- true
   176  		}()
   177  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   178  		}
   179  		runqput(p, &gs[1], next1)
   180  		runqget(p)
   181  		<-done
   182  		runqget(p)
   183  	}
   184  }
   185  
   186  var (
   187  	StringHash = stringHash
   188  	BytesHash  = bytesHash
   189  	Int32Hash  = int32Hash
   190  	Int64Hash  = int64Hash
   191  	MemHash    = memhash
   192  	MemHash32  = memhash32
   193  	MemHash64  = memhash64
   194  	EfaceHash  = efaceHash
   195  	IfaceHash  = ifaceHash
   196  )
   197  
   198  var UseAeshash = &useAeshash
   199  
   200  func MemclrBytes(b []byte) {
   201  	s := (*slice)(unsafe.Pointer(&b))
   202  	memclrNoHeapPointers(s.array, uintptr(s.len))
   203  }
   204  
   205  const HashLoad = hashLoad
   206  
   207  // entry point for testing
   208  func GostringW(w []uint16) (s string) {
   209  	systemstack(func() {
   210  		s = gostringw(&w[0])
   211  	})
   212  	return
   213  }
   214  
   215  var Open = open
   216  var Close = closefd
   217  var Read = read
   218  var Write = write
   219  
   220  func Envs() []string     { return envs }
   221  func SetEnvs(e []string) { envs = e }
   222  
   223  // For benchmarking.
   224  
   225  func BenchSetType(n int, x any) {
   226  	e := *efaceOf(&x)
   227  	t := e._type
   228  	var size uintptr
   229  	var p unsafe.Pointer
   230  	switch t.kind & kindMask {
   231  	case kindPtr:
   232  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   233  		size = t.size
   234  		p = e.data
   235  	case kindSlice:
   236  		slice := *(*struct {
   237  			ptr      unsafe.Pointer
   238  			len, cap uintptr
   239  		})(e.data)
   240  		t = (*slicetype)(unsafe.Pointer(t)).elem
   241  		size = t.size * slice.len
   242  		p = slice.ptr
   243  	}
   244  	allocSize := roundupsize(size)
   245  	systemstack(func() {
   246  		for i := 0; i < n; i++ {
   247  			heapBitsSetType(uintptr(p), allocSize, size, t)
   248  		}
   249  	})
   250  }
   251  
   252  const PtrSize = goarch.PtrSize
   253  
   254  var ForceGCPeriod = &forcegcperiod
   255  
   256  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   257  // the "environment" traceback level, so later calls to
   258  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   259  func SetTracebackEnv(level string) {
   260  	setTraceback(level)
   261  	traceback_env = traceback_cache
   262  }
   263  
   264  var ReadUnaligned32 = readUnaligned32
   265  var ReadUnaligned64 = readUnaligned64
   266  
   267  func CountPagesInUse() (pagesInUse, counted uintptr) {
   268  	stopTheWorld("CountPagesInUse")
   269  
   270  	pagesInUse = uintptr(mheap_.pagesInUse.Load())
   271  
   272  	for _, s := range mheap_.allspans {
   273  		if s.state.get() == mSpanInUse {
   274  			counted += s.npages
   275  		}
   276  	}
   277  
   278  	startTheWorld()
   279  
   280  	return
   281  }
   282  
   283  func Fastrand() uint32          { return fastrand() }
   284  func Fastrand64() uint64        { return fastrand64() }
   285  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   286  
   287  type ProfBuf profBuf
   288  
   289  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   290  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   291  }
   292  
   293  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   294  	(*profBuf)(p).write(tag, now, hdr, stk)
   295  }
   296  
   297  const (
   298  	ProfBufBlocking    = profBufBlocking
   299  	ProfBufNonBlocking = profBufNonBlocking
   300  )
   301  
   302  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   303  	return (*profBuf)(p).read(profBufReadMode(mode))
   304  }
   305  
   306  func (p *ProfBuf) Close() {
   307  	(*profBuf)(p).close()
   308  }
   309  
   310  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   311  	stopTheWorld("ReadMetricsSlow")
   312  
   313  	// Initialize the metrics beforehand because this could
   314  	// allocate and skew the stats.
   315  	metricsLock()
   316  	initMetrics()
   317  	metricsUnlock()
   318  
   319  	systemstack(func() {
   320  		// Read memstats first. It's going to flush
   321  		// the mcaches which readMetrics does not do, so
   322  		// going the other way around may result in
   323  		// inconsistent statistics.
   324  		readmemstats_m(memStats)
   325  	})
   326  
   327  	// Read metrics off the system stack.
   328  	//
   329  	// The only part of readMetrics that could allocate
   330  	// and skew the stats is initMetrics.
   331  	readMetrics(samplesp, len, cap)
   332  
   333  	startTheWorld()
   334  }
   335  
   336  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   337  // MemStats accumulated by scanning the heap.
   338  func ReadMemStatsSlow() (base, slow MemStats) {
   339  	stopTheWorld("ReadMemStatsSlow")
   340  
   341  	// Run on the system stack to avoid stack growth allocation.
   342  	systemstack(func() {
   343  		// Make sure stats don't change.
   344  		getg().m.mallocing++
   345  
   346  		readmemstats_m(&base)
   347  
   348  		// Initialize slow from base and zero the fields we're
   349  		// recomputing.
   350  		slow = base
   351  		slow.Alloc = 0
   352  		slow.TotalAlloc = 0
   353  		slow.Mallocs = 0
   354  		slow.Frees = 0
   355  		slow.HeapReleased = 0
   356  		var bySize [_NumSizeClasses]struct {
   357  			Mallocs, Frees uint64
   358  		}
   359  
   360  		// Add up current allocations in spans.
   361  		for _, s := range mheap_.allspans {
   362  			if s.state.get() != mSpanInUse {
   363  				continue
   364  			}
   365  			if s.isUnusedUserArenaChunk() {
   366  				continue
   367  			}
   368  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   369  				slow.Mallocs++
   370  				slow.Alloc += uint64(s.elemsize)
   371  			} else {
   372  				slow.Mallocs += uint64(s.allocCount)
   373  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   374  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   375  			}
   376  		}
   377  
   378  		// Add in frees by just reading the stats for those directly.
   379  		var m heapStatsDelta
   380  		memstats.heapStats.unsafeRead(&m)
   381  
   382  		// Collect per-sizeclass free stats.
   383  		var smallFree uint64
   384  		for i := 0; i < _NumSizeClasses; i++ {
   385  			slow.Frees += uint64(m.smallFreeCount[i])
   386  			bySize[i].Frees += uint64(m.smallFreeCount[i])
   387  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
   388  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
   389  		}
   390  		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
   391  		slow.Mallocs += slow.Frees
   392  
   393  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
   394  
   395  		for i := range slow.BySize {
   396  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   397  			slow.BySize[i].Frees = bySize[i].Frees
   398  		}
   399  
   400  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   401  			chunk := mheap_.pages.tryChunkOf(i)
   402  			if chunk == nil {
   403  				continue
   404  			}
   405  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   406  			slow.HeapReleased += uint64(pg) * pageSize
   407  		}
   408  		for _, p := range allp {
   409  			pg := sys.OnesCount64(p.pcache.scav)
   410  			slow.HeapReleased += uint64(pg) * pageSize
   411  		}
   412  
   413  		getg().m.mallocing--
   414  	})
   415  
   416  	startTheWorld()
   417  	return
   418  }
   419  
   420  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   421  // stderr, and blocks in a stack containing
   422  // "runtime.blockOnSystemStackInternal".
   423  func BlockOnSystemStack() {
   424  	systemstack(blockOnSystemStackInternal)
   425  }
   426  
   427  func blockOnSystemStackInternal() {
   428  	print("x\n")
   429  	lock(&deadlock)
   430  	lock(&deadlock)
   431  }
   432  
   433  type RWMutex struct {
   434  	rw rwmutex
   435  }
   436  
   437  func (rw *RWMutex) RLock() {
   438  	rw.rw.rlock()
   439  }
   440  
   441  func (rw *RWMutex) RUnlock() {
   442  	rw.rw.runlock()
   443  }
   444  
   445  func (rw *RWMutex) Lock() {
   446  	rw.rw.lock()
   447  }
   448  
   449  func (rw *RWMutex) Unlock() {
   450  	rw.rw.unlock()
   451  }
   452  
   453  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   454  
   455  func MapBucketsCount(m map[int]int) int {
   456  	h := *(**hmap)(unsafe.Pointer(&m))
   457  	return 1 << h.B
   458  }
   459  
   460  func MapBucketsPointerIsNil(m map[int]int) bool {
   461  	h := *(**hmap)(unsafe.Pointer(&m))
   462  	return h.buckets == nil
   463  }
   464  
   465  func LockOSCounts() (external, internal uint32) {
   466  	gp := getg()
   467  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   468  		if gp.lockedm != 0 {
   469  			panic("lockedm on non-locked goroutine")
   470  		}
   471  	} else {
   472  		if gp.lockedm == 0 {
   473  			panic("nil lockedm on locked goroutine")
   474  		}
   475  	}
   476  	return gp.m.lockedExt, gp.m.lockedInt
   477  }
   478  
   479  //go:noinline
   480  func TracebackSystemstack(stk []uintptr, i int) int {
   481  	if i == 0 {
   482  		pc, sp := getcallerpc(), getcallersp()
   483  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
   484  	}
   485  	n := 0
   486  	systemstack(func() {
   487  		n = TracebackSystemstack(stk, i-1)
   488  	})
   489  	return n
   490  }
   491  
   492  func KeepNArenaHints(n int) {
   493  	hint := mheap_.arenaHints
   494  	for i := 1; i < n; i++ {
   495  		hint = hint.next
   496  		if hint == nil {
   497  			return
   498  		}
   499  	}
   500  	hint.next = nil
   501  }
   502  
   503  // MapNextArenaHint reserves a page at the next arena growth hint,
   504  // preventing the arena from growing there, and returns the range of
   505  // addresses that are no longer viable.
   506  func MapNextArenaHint() (start, end uintptr) {
   507  	hint := mheap_.arenaHints
   508  	addr := hint.addr
   509  	if hint.down {
   510  		start, end = addr-heapArenaBytes, addr
   511  		addr -= physPageSize
   512  	} else {
   513  		start, end = addr, addr+heapArenaBytes
   514  	}
   515  	sysReserve(unsafe.Pointer(addr), physPageSize)
   516  	return
   517  }
   518  
   519  func GetNextArenaHint() uintptr {
   520  	return mheap_.arenaHints.addr
   521  }
   522  
   523  type G = g
   524  
   525  type Sudog = sudog
   526  
   527  func Getg() *G {
   528  	return getg()
   529  }
   530  
   531  func GIsWaitingOnMutex(gp *G) bool {
   532  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   533  }
   534  
   535  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   536  
   537  //go:noinline
   538  func PanicForTesting(b []byte, i int) byte {
   539  	return unexportedPanicForTesting(b, i)
   540  }
   541  
   542  //go:noinline
   543  func unexportedPanicForTesting(b []byte, i int) byte {
   544  	return b[i]
   545  }
   546  
   547  func G0StackOverflow() {
   548  	systemstack(func() {
   549  		stackOverflow(nil)
   550  	})
   551  }
   552  
   553  func stackOverflow(x *byte) {
   554  	var buf [256]byte
   555  	stackOverflow(&buf[0])
   556  }
   557  
   558  func MapTombstoneCheck(m map[int]int) {
   559  	// Make sure emptyOne and emptyRest are distributed correctly.
   560  	// We should have a series of filled and emptyOne cells, followed by
   561  	// a series of emptyRest cells.
   562  	h := *(**hmap)(unsafe.Pointer(&m))
   563  	i := any(m)
   564  	t := *(**maptype)(unsafe.Pointer(&i))
   565  
   566  	for x := 0; x < 1<<h.B; x++ {
   567  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
   568  		n := 0
   569  		for b := b0; b != nil; b = b.overflow(t) {
   570  			for i := 0; i < bucketCnt; i++ {
   571  				if b.tophash[i] != emptyRest {
   572  					n++
   573  				}
   574  			}
   575  		}
   576  		k := 0
   577  		for b := b0; b != nil; b = b.overflow(t) {
   578  			for i := 0; i < bucketCnt; i++ {
   579  				if k < n && b.tophash[i] == emptyRest {
   580  					panic("early emptyRest")
   581  				}
   582  				if k >= n && b.tophash[i] != emptyRest {
   583  					panic("late non-emptyRest")
   584  				}
   585  				if k == n-1 && b.tophash[i] == emptyOne {
   586  					panic("last non-emptyRest entry is emptyOne")
   587  				}
   588  				k++
   589  			}
   590  		}
   591  	}
   592  }
   593  
   594  func RunGetgThreadSwitchTest() {
   595  	// Test that getg works correctly with thread switch.
   596  	// With gccgo, if we generate getg inlined, the backend
   597  	// may cache the address of the TLS variable, which
   598  	// will become invalid after a thread switch. This test
   599  	// checks that the bad caching doesn't happen.
   600  
   601  	ch := make(chan int)
   602  	go func(ch chan int) {
   603  		ch <- 5
   604  		LockOSThread()
   605  	}(ch)
   606  
   607  	g1 := getg()
   608  
   609  	// Block on a receive. This is likely to get us a thread
   610  	// switch. If we yield to the sender goroutine, it will
   611  	// lock the thread, forcing us to resume on a different
   612  	// thread.
   613  	<-ch
   614  
   615  	g2 := getg()
   616  	if g1 != g2 {
   617  		panic("g1 != g2")
   618  	}
   619  
   620  	// Also test getg after some control flow, as the
   621  	// backend is sensitive to control flow.
   622  	g3 := getg()
   623  	if g1 != g3 {
   624  		panic("g1 != g3")
   625  	}
   626  }
   627  
   628  const (
   629  	PageSize         = pageSize
   630  	PallocChunkPages = pallocChunkPages
   631  	PageAlloc64Bit   = pageAlloc64Bit
   632  	PallocSumBytes   = pallocSumBytes
   633  )
   634  
   635  // Expose pallocSum for testing.
   636  type PallocSum pallocSum
   637  
   638  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   639  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   640  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   641  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   642  
   643  // Expose pallocBits for testing.
   644  type PallocBits pallocBits
   645  
   646  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   647  	return (*pallocBits)(b).find(npages, searchIdx)
   648  }
   649  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   650  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   651  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   652  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   653  
   654  // SummarizeSlow is a slow but more obviously correct implementation
   655  // of (*pallocBits).summarize. Used for testing.
   656  func SummarizeSlow(b *PallocBits) PallocSum {
   657  	var start, max, end uint
   658  
   659  	const N = uint(len(b)) * 64
   660  	for start < N && (*pageBits)(b).get(start) == 0 {
   661  		start++
   662  	}
   663  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   664  		end++
   665  	}
   666  	run := uint(0)
   667  	for i := uint(0); i < N; i++ {
   668  		if (*pageBits)(b).get(i) == 0 {
   669  			run++
   670  		} else {
   671  			run = 0
   672  		}
   673  		if run > max {
   674  			max = run
   675  		}
   676  	}
   677  	return PackPallocSum(start, max, end)
   678  }
   679  
   680  // Expose non-trivial helpers for testing.
   681  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   682  
   683  // Given two PallocBits, returns a set of bit ranges where
   684  // they differ.
   685  func DiffPallocBits(a, b *PallocBits) []BitRange {
   686  	ba := (*pageBits)(a)
   687  	bb := (*pageBits)(b)
   688  
   689  	var d []BitRange
   690  	base, size := uint(0), uint(0)
   691  	for i := uint(0); i < uint(len(ba))*64; i++ {
   692  		if ba.get(i) != bb.get(i) {
   693  			if size == 0 {
   694  				base = i
   695  			}
   696  			size++
   697  		} else {
   698  			if size != 0 {
   699  				d = append(d, BitRange{base, size})
   700  			}
   701  			size = 0
   702  		}
   703  	}
   704  	if size != 0 {
   705  		d = append(d, BitRange{base, size})
   706  	}
   707  	return d
   708  }
   709  
   710  // StringifyPallocBits gets the bits in the bit range r from b,
   711  // and returns a string containing the bits as ASCII 0 and 1
   712  // characters.
   713  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   714  	str := ""
   715  	for j := r.I; j < r.I+r.N; j++ {
   716  		if (*pageBits)(b).get(j) != 0 {
   717  			str += "1"
   718  		} else {
   719  			str += "0"
   720  		}
   721  	}
   722  	return str
   723  }
   724  
   725  // Expose pallocData for testing.
   726  type PallocData pallocData
   727  
   728  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   729  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   730  }
   731  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   732  func (d *PallocData) ScavengedSetRange(i, n uint) {
   733  	(*pallocData)(d).scavenged.setRange(i, n)
   734  }
   735  func (d *PallocData) PallocBits() *PallocBits {
   736  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   737  }
   738  func (d *PallocData) Scavenged() *PallocBits {
   739  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   740  }
   741  
   742  // Expose fillAligned for testing.
   743  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   744  
   745  // Expose pageCache for testing.
   746  type PageCache pageCache
   747  
   748  const PageCachePages = pageCachePages
   749  
   750  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   751  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   752  }
   753  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   754  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   755  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   756  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   757  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   758  	return (*pageCache)(c).alloc(npages)
   759  }
   760  func (c *PageCache) Flush(s *PageAlloc) {
   761  	cp := (*pageCache)(c)
   762  	sp := (*pageAlloc)(s)
   763  
   764  	systemstack(func() {
   765  		// None of the tests need any higher-level locking, so we just
   766  		// take the lock internally.
   767  		lock(sp.mheapLock)
   768  		cp.flush(sp)
   769  		unlock(sp.mheapLock)
   770  	})
   771  }
   772  
   773  // Expose chunk index type.
   774  type ChunkIdx chunkIdx
   775  
   776  // Expose pageAlloc for testing. Note that because pageAlloc is
   777  // not in the heap, so is PageAlloc.
   778  type PageAlloc pageAlloc
   779  
   780  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   781  	pp := (*pageAlloc)(p)
   782  
   783  	var addr, scav uintptr
   784  	systemstack(func() {
   785  		// None of the tests need any higher-level locking, so we just
   786  		// take the lock internally.
   787  		lock(pp.mheapLock)
   788  		addr, scav = pp.alloc(npages)
   789  		unlock(pp.mheapLock)
   790  	})
   791  	return addr, scav
   792  }
   793  func (p *PageAlloc) AllocToCache() PageCache {
   794  	pp := (*pageAlloc)(p)
   795  
   796  	var c PageCache
   797  	systemstack(func() {
   798  		// None of the tests need any higher-level locking, so we just
   799  		// take the lock internally.
   800  		lock(pp.mheapLock)
   801  		c = PageCache(pp.allocToCache())
   802  		unlock(pp.mheapLock)
   803  	})
   804  	return c
   805  }
   806  func (p *PageAlloc) Free(base, npages uintptr) {
   807  	pp := (*pageAlloc)(p)
   808  
   809  	systemstack(func() {
   810  		// None of the tests need any higher-level locking, so we just
   811  		// take the lock internally.
   812  		lock(pp.mheapLock)
   813  		pp.free(base, npages, true)
   814  		unlock(pp.mheapLock)
   815  	})
   816  }
   817  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   818  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   819  }
   820  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   821  	pp := (*pageAlloc)(p)
   822  	systemstack(func() {
   823  		r = pp.scavenge(nbytes, nil)
   824  	})
   825  	return
   826  }
   827  func (p *PageAlloc) InUse() []AddrRange {
   828  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   829  	for _, r := range p.inUse.ranges {
   830  		ranges = append(ranges, AddrRange{r})
   831  	}
   832  	return ranges
   833  }
   834  
   835  // Returns nil if the PallocData's L2 is missing.
   836  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   837  	ci := chunkIdx(i)
   838  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   839  }
   840  
   841  // AddrRange is a wrapper around addrRange for testing.
   842  type AddrRange struct {
   843  	addrRange
   844  }
   845  
   846  // MakeAddrRange creates a new address range.
   847  func MakeAddrRange(base, limit uintptr) AddrRange {
   848  	return AddrRange{makeAddrRange(base, limit)}
   849  }
   850  
   851  // Base returns the virtual base address of the address range.
   852  func (a AddrRange) Base() uintptr {
   853  	return a.addrRange.base.addr()
   854  }
   855  
   856  // Base returns the virtual address of the limit of the address range.
   857  func (a AddrRange) Limit() uintptr {
   858  	return a.addrRange.limit.addr()
   859  }
   860  
   861  // Equals returns true if the two address ranges are exactly equal.
   862  func (a AddrRange) Equals(b AddrRange) bool {
   863  	return a == b
   864  }
   865  
   866  // Size returns the size in bytes of the address range.
   867  func (a AddrRange) Size() uintptr {
   868  	return a.addrRange.size()
   869  }
   870  
   871  // testSysStat is the sysStat passed to test versions of various
   872  // runtime structures. We do actually have to keep track of this
   873  // because otherwise memstats.mappedReady won't actually line up
   874  // with other stats in the runtime during tests.
   875  var testSysStat = &memstats.other_sys
   876  
   877  // AddrRanges is a wrapper around addrRanges for testing.
   878  type AddrRanges struct {
   879  	addrRanges
   880  	mutable bool
   881  }
   882  
   883  // NewAddrRanges creates a new empty addrRanges.
   884  //
   885  // Note that this initializes addrRanges just like in the
   886  // runtime, so its memory is persistentalloc'd. Call this
   887  // function sparingly since the memory it allocates is
   888  // leaked.
   889  //
   890  // This AddrRanges is mutable, so we can test methods like
   891  // Add.
   892  func NewAddrRanges() AddrRanges {
   893  	r := addrRanges{}
   894  	r.init(testSysStat)
   895  	return AddrRanges{r, true}
   896  }
   897  
   898  // MakeAddrRanges creates a new addrRanges populated with
   899  // the ranges in a.
   900  //
   901  // The returned AddrRanges is immutable, so methods like
   902  // Add will fail.
   903  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   904  	// Methods that manipulate the backing store of addrRanges.ranges should
   905  	// not be used on the result from this function (e.g. add) since they may
   906  	// trigger reallocation. That would normally be fine, except the new
   907  	// backing store won't come from the heap, but from persistentalloc, so
   908  	// we'll leak some memory implicitly.
   909  	ranges := make([]addrRange, 0, len(a))
   910  	total := uintptr(0)
   911  	for _, r := range a {
   912  		ranges = append(ranges, r.addrRange)
   913  		total += r.Size()
   914  	}
   915  	return AddrRanges{addrRanges{
   916  		ranges:     ranges,
   917  		totalBytes: total,
   918  		sysStat:    testSysStat,
   919  	}, false}
   920  }
   921  
   922  // Ranges returns a copy of the ranges described by the
   923  // addrRanges.
   924  func (a *AddrRanges) Ranges() []AddrRange {
   925  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   926  	for _, r := range a.addrRanges.ranges {
   927  		result = append(result, AddrRange{r})
   928  	}
   929  	return result
   930  }
   931  
   932  // FindSucc returns the successor to base. See addrRanges.findSucc
   933  // for more details.
   934  func (a *AddrRanges) FindSucc(base uintptr) int {
   935  	return a.findSucc(base)
   936  }
   937  
   938  // Add adds a new AddrRange to the AddrRanges.
   939  //
   940  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   941  // otherwise this method will throw.
   942  func (a *AddrRanges) Add(r AddrRange) {
   943  	if !a.mutable {
   944  		throw("attempt to mutate immutable AddrRanges")
   945  	}
   946  	a.add(r.addrRange)
   947  }
   948  
   949  // TotalBytes returns the totalBytes field of the addrRanges.
   950  func (a *AddrRanges) TotalBytes() uintptr {
   951  	return a.addrRanges.totalBytes
   952  }
   953  
   954  // BitRange represents a range over a bitmap.
   955  type BitRange struct {
   956  	I, N uint // bit index and length in bits
   957  }
   958  
   959  // NewPageAlloc creates a new page allocator for testing and
   960  // initializes it with the scav and chunks maps. Each key in these maps
   961  // represents a chunk index and each value is a series of bit ranges to
   962  // set within each bitmap's chunk.
   963  //
   964  // The initialization of the pageAlloc preserves the invariant that if a
   965  // scavenged bit is set the alloc bit is necessarily unset, so some
   966  // of the bits described by scav may be cleared in the final bitmap if
   967  // ranges in chunks overlap with them.
   968  //
   969  // scav is optional, and if nil, the scavenged bitmap will be cleared
   970  // (as opposed to all 1s, which it usually is). Furthermore, every
   971  // chunk index in scav must appear in chunks; ones that do not are
   972  // ignored.
   973  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
   974  	p := new(pageAlloc)
   975  
   976  	// We've got an entry, so initialize the pageAlloc.
   977  	p.init(new(mutex), testSysStat)
   978  	lockInit(p.mheapLock, lockRankMheap)
   979  	p.test = true
   980  	for i, init := range chunks {
   981  		addr := chunkBase(chunkIdx(i))
   982  
   983  		// Mark the chunk's existence in the pageAlloc.
   984  		systemstack(func() {
   985  			lock(p.mheapLock)
   986  			p.grow(addr, pallocChunkBytes)
   987  			unlock(p.mheapLock)
   988  		})
   989  
   990  		// Initialize the bitmap and update pageAlloc metadata.
   991  		chunk := p.chunkOf(chunkIndex(addr))
   992  
   993  		// Clear all the scavenged bits which grow set.
   994  		chunk.scavenged.clearRange(0, pallocChunkPages)
   995  
   996  		// Apply scavenge state if applicable.
   997  		if scav != nil {
   998  			if scvg, ok := scav[i]; ok {
   999  				for _, s := range scvg {
  1000  					// Ignore the case of s.N == 0. setRange doesn't handle
  1001  					// it and it's a no-op anyway.
  1002  					if s.N != 0 {
  1003  						chunk.scavenged.setRange(s.I, s.N)
  1004  					}
  1005  				}
  1006  			}
  1007  		}
  1008  
  1009  		// Apply alloc state.
  1010  		for _, s := range init {
  1011  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1012  			// it and it's a no-op anyway.
  1013  			if s.N != 0 {
  1014  				chunk.allocRange(s.I, s.N)
  1015  			}
  1016  		}
  1017  
  1018  		// Make sure the scavenge index is updated.
  1019  		//
  1020  		// This is an inefficient way to do it, but it's also the simplest way.
  1021  		minPages := physPageSize / pageSize
  1022  		if minPages < 1 {
  1023  			minPages = 1
  1024  		}
  1025  		_, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, minPages)
  1026  		if npages != 0 {
  1027  			p.scav.index.mark(addr, addr+pallocChunkBytes)
  1028  		}
  1029  
  1030  		// Update heap metadata for the allocRange calls above.
  1031  		systemstack(func() {
  1032  			lock(p.mheapLock)
  1033  			p.update(addr, pallocChunkPages, false, false)
  1034  			unlock(p.mheapLock)
  1035  		})
  1036  	}
  1037  
  1038  	return (*PageAlloc)(p)
  1039  }
  1040  
  1041  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1042  // is called the pageAlloc may no longer be used. The object itself will be
  1043  // collected by the garbage collector once it is no longer live.
  1044  func FreePageAlloc(pp *PageAlloc) {
  1045  	p := (*pageAlloc)(pp)
  1046  
  1047  	// Free all the mapped space for the summary levels.
  1048  	if pageAlloc64Bit != 0 {
  1049  		for l := 0; l < summaryLevels; l++ {
  1050  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1051  		}
  1052  		// Only necessary on 64-bit. This is a global on 32-bit.
  1053  		sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks)))
  1054  	} else {
  1055  		resSize := uintptr(0)
  1056  		for _, s := range p.summary {
  1057  			resSize += uintptr(cap(s)) * pallocSumBytes
  1058  		}
  1059  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1060  	}
  1061  
  1062  	// Subtract back out whatever we mapped for the summaries.
  1063  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1064  	// (and in anger should actually be accounted for), and there's no other
  1065  	// way to figure out how much we actually mapped.
  1066  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1067  	testSysStat.add(-int64(p.summaryMappedReady))
  1068  
  1069  	// Free the mapped space for chunks.
  1070  	for i := range p.chunks {
  1071  		if x := p.chunks[i]; x != nil {
  1072  			p.chunks[i] = nil
  1073  			// This memory comes from sysAlloc and will always be page-aligned.
  1074  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1075  		}
  1076  	}
  1077  }
  1078  
  1079  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1080  // 64 bit and 32 bit platforms, allowing the tests to share code
  1081  // between the two.
  1082  //
  1083  // This should not be higher than 0x100*pallocChunkBytes to support
  1084  // mips and mipsle, which only have 31-bit address spaces.
  1085  var BaseChunkIdx = func() ChunkIdx {
  1086  	var prefix uintptr
  1087  	if pageAlloc64Bit != 0 {
  1088  		prefix = 0xc000
  1089  	} else {
  1090  		prefix = 0x100
  1091  	}
  1092  	baseAddr := prefix * pallocChunkBytes
  1093  	if goos.IsAix != 0 {
  1094  		baseAddr += arenaBaseOffset
  1095  	}
  1096  	return ChunkIdx(chunkIndex(baseAddr))
  1097  }()
  1098  
  1099  // PageBase returns an address given a chunk index and a page index
  1100  // relative to that chunk.
  1101  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1102  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1103  }
  1104  
  1105  type BitsMismatch struct {
  1106  	Base      uintptr
  1107  	Got, Want uint64
  1108  }
  1109  
  1110  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1111  	ok = true
  1112  
  1113  	// Run on the system stack to avoid stack growth allocation.
  1114  	systemstack(func() {
  1115  		getg().m.mallocing++
  1116  
  1117  		// Lock so that we can safely access the bitmap.
  1118  		lock(&mheap_.lock)
  1119  	chunkLoop:
  1120  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1121  			chunk := mheap_.pages.tryChunkOf(i)
  1122  			if chunk == nil {
  1123  				continue
  1124  			}
  1125  			for j := 0; j < pallocChunkPages/64; j++ {
  1126  				// Run over each 64-bit bitmap section and ensure
  1127  				// scavenged is being cleared properly on allocation.
  1128  				// If a used bit and scavenged bit are both set, that's
  1129  				// an error, and could indicate a larger problem, or
  1130  				// an accounting problem.
  1131  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1132  				got := chunk.scavenged[j]
  1133  				if want != got {
  1134  					ok = false
  1135  					if n >= len(mismatches) {
  1136  						break chunkLoop
  1137  					}
  1138  					mismatches[n] = BitsMismatch{
  1139  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1140  						Got:  got,
  1141  						Want: want,
  1142  					}
  1143  					n++
  1144  				}
  1145  			}
  1146  		}
  1147  		unlock(&mheap_.lock)
  1148  
  1149  		getg().m.mallocing--
  1150  	})
  1151  	return
  1152  }
  1153  
  1154  func PageCachePagesLeaked() (leaked uintptr) {
  1155  	stopTheWorld("PageCachePagesLeaked")
  1156  
  1157  	// Walk over destroyed Ps and look for unflushed caches.
  1158  	deadp := allp[len(allp):cap(allp)]
  1159  	for _, p := range deadp {
  1160  		// Since we're going past len(allp) we may see nil Ps.
  1161  		// Just ignore them.
  1162  		if p != nil {
  1163  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1164  		}
  1165  	}
  1166  
  1167  	startTheWorld()
  1168  	return
  1169  }
  1170  
  1171  var Semacquire = semacquire
  1172  var Semrelease1 = semrelease1
  1173  
  1174  func SemNwait(addr *uint32) uint32 {
  1175  	root := semtable.rootFor(addr)
  1176  	return root.nwait.Load()
  1177  }
  1178  
  1179  const SemTableSize = semTabSize
  1180  
  1181  // SemTable is a wrapper around semTable exported for testing.
  1182  type SemTable struct {
  1183  	semTable
  1184  }
  1185  
  1186  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1187  func (t *SemTable) Enqueue(addr *uint32) {
  1188  	s := acquireSudog()
  1189  	s.releasetime = 0
  1190  	s.acquiretime = 0
  1191  	s.ticket = 0
  1192  	t.semTable.rootFor(addr).queue(addr, s, false)
  1193  }
  1194  
  1195  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1196  //
  1197  // Returns true if there actually was a waiter to be dequeued.
  1198  func (t *SemTable) Dequeue(addr *uint32) bool {
  1199  	s, _ := t.semTable.rootFor(addr).dequeue(addr)
  1200  	if s != nil {
  1201  		releaseSudog(s)
  1202  		return true
  1203  	}
  1204  	return false
  1205  }
  1206  
  1207  // mspan wrapper for testing.
  1208  type MSpan mspan
  1209  
  1210  // Allocate an mspan for testing.
  1211  func AllocMSpan() *MSpan {
  1212  	var s *mspan
  1213  	systemstack(func() {
  1214  		lock(&mheap_.lock)
  1215  		s = (*mspan)(mheap_.spanalloc.alloc())
  1216  		unlock(&mheap_.lock)
  1217  	})
  1218  	return (*MSpan)(s)
  1219  }
  1220  
  1221  // Free an allocated mspan.
  1222  func FreeMSpan(s *MSpan) {
  1223  	systemstack(func() {
  1224  		lock(&mheap_.lock)
  1225  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1226  		unlock(&mheap_.lock)
  1227  	})
  1228  }
  1229  
  1230  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1231  	s := (*mspan)(ms)
  1232  	s.nelems = uintptr(len(bits) * 8)
  1233  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1234  	result := s.countAlloc()
  1235  	s.gcmarkBits = nil
  1236  	return result
  1237  }
  1238  
  1239  const (
  1240  	TimeHistSubBucketBits = timeHistSubBucketBits
  1241  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1242  	TimeHistNumBuckets    = timeHistNumBuckets
  1243  	TimeHistMinBucketBits = timeHistMinBucketBits
  1244  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1245  )
  1246  
  1247  type TimeHistogram timeHistogram
  1248  
  1249  // Counts returns the counts for the given bucket, subBucket indices.
  1250  // Returns true if the bucket was valid, otherwise returns the counts
  1251  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1252  // bucket < 0, and false.
  1253  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1254  	t := (*timeHistogram)(th)
  1255  	if bucket < 0 {
  1256  		return t.underflow.Load(), false
  1257  	}
  1258  	i := bucket*TimeHistNumSubBuckets + subBucket
  1259  	if i >= len(t.counts) {
  1260  		return t.overflow.Load(), false
  1261  	}
  1262  	return t.counts[i].Load(), true
  1263  }
  1264  
  1265  func (th *TimeHistogram) Record(duration int64) {
  1266  	(*timeHistogram)(th).record(duration)
  1267  }
  1268  
  1269  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1270  
  1271  func SetIntArgRegs(a int) int {
  1272  	lock(&finlock)
  1273  	old := intArgRegs
  1274  	if a >= 0 {
  1275  		intArgRegs = a
  1276  	}
  1277  	unlock(&finlock)
  1278  	return old
  1279  }
  1280  
  1281  func FinalizerGAsleep() bool {
  1282  	return fingStatus.Load()&fingWait != 0
  1283  }
  1284  
  1285  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1286  // extra layer of call, since then there's a return before the "real"
  1287  // next call.
  1288  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1289  
  1290  // For GCTestIsReachable, it's important that we do this as a call so
  1291  // escape analysis can see through it.
  1292  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1293  	return gcTestIsReachable(ptrs...)
  1294  }
  1295  
  1296  // For GCTestPointerClass, it's important that we do this as a call so
  1297  // escape analysis can see through it.
  1298  //
  1299  // This is nosplit because gcTestPointerClass is.
  1300  //
  1301  //go:nosplit
  1302  func GCTestPointerClass(p unsafe.Pointer) string {
  1303  	return gcTestPointerClass(p)
  1304  }
  1305  
  1306  const Raceenabled = raceenabled
  1307  
  1308  const (
  1309  	GCBackgroundUtilization     = gcBackgroundUtilization
  1310  	GCGoalUtilization           = gcGoalUtilization
  1311  	DefaultHeapMinimum          = defaultHeapMinimum
  1312  	MemoryLimitHeapGoalHeadroom = memoryLimitHeapGoalHeadroom
  1313  )
  1314  
  1315  type GCController struct {
  1316  	gcControllerState
  1317  }
  1318  
  1319  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1320  	// Force the controller to escape. We're going to
  1321  	// do 64-bit atomics on it, and if it gets stack-allocated
  1322  	// on a 32-bit architecture, it may get allocated unaligned
  1323  	// space.
  1324  	g := Escape(new(GCController))
  1325  	g.gcControllerState.test = true // Mark it as a test copy.
  1326  	g.init(int32(gcPercent), memoryLimit)
  1327  	return g
  1328  }
  1329  
  1330  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1331  	trigger, _ := c.trigger()
  1332  	if c.heapMarked > trigger {
  1333  		trigger = c.heapMarked
  1334  	}
  1335  	c.maxStackScan.Store(stackSize)
  1336  	c.globalsScan.Store(globalsSize)
  1337  	c.heapLive.Store(trigger)
  1338  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1339  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1340  }
  1341  
  1342  func (c *GCController) AssistWorkPerByte() float64 {
  1343  	return c.assistWorkPerByte.Load()
  1344  }
  1345  
  1346  func (c *GCController) HeapGoal() uint64 {
  1347  	return c.heapGoal()
  1348  }
  1349  
  1350  func (c *GCController) HeapLive() uint64 {
  1351  	return c.heapLive.Load()
  1352  }
  1353  
  1354  func (c *GCController) HeapMarked() uint64 {
  1355  	return c.heapMarked
  1356  }
  1357  
  1358  func (c *GCController) Triggered() uint64 {
  1359  	return c.triggered
  1360  }
  1361  
  1362  type GCControllerReviseDelta struct {
  1363  	HeapLive        int64
  1364  	HeapScan        int64
  1365  	HeapScanWork    int64
  1366  	StackScanWork   int64
  1367  	GlobalsScanWork int64
  1368  }
  1369  
  1370  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1371  	c.heapLive.Add(d.HeapLive)
  1372  	c.heapScan.Add(d.HeapScan)
  1373  	c.heapScanWork.Add(d.HeapScanWork)
  1374  	c.stackScanWork.Add(d.StackScanWork)
  1375  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1376  	c.revise()
  1377  }
  1378  
  1379  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1380  	c.assistTime.Store(assistTime)
  1381  	c.endCycle(elapsed, gomaxprocs, false)
  1382  	c.resetLive(bytesMarked)
  1383  	c.commit(false)
  1384  }
  1385  
  1386  func (c *GCController) AddIdleMarkWorker() bool {
  1387  	return c.addIdleMarkWorker()
  1388  }
  1389  
  1390  func (c *GCController) NeedIdleMarkWorker() bool {
  1391  	return c.needIdleMarkWorker()
  1392  }
  1393  
  1394  func (c *GCController) RemoveIdleMarkWorker() {
  1395  	c.removeIdleMarkWorker()
  1396  }
  1397  
  1398  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1399  	c.setMaxIdleMarkWorkers(max)
  1400  }
  1401  
  1402  var alwaysFalse bool
  1403  var escapeSink any
  1404  
  1405  func Escape[T any](x T) T {
  1406  	if alwaysFalse {
  1407  		escapeSink = x
  1408  	}
  1409  	return x
  1410  }
  1411  
  1412  // Acquirem blocks preemption.
  1413  func Acquirem() {
  1414  	acquirem()
  1415  }
  1416  
  1417  func Releasem() {
  1418  	releasem(getg().m)
  1419  }
  1420  
  1421  var Timediv = timediv
  1422  
  1423  type PIController struct {
  1424  	piController
  1425  }
  1426  
  1427  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1428  	return &PIController{piController{
  1429  		kp:  kp,
  1430  		ti:  ti,
  1431  		tt:  tt,
  1432  		min: min,
  1433  		max: max,
  1434  	}}
  1435  }
  1436  
  1437  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1438  	return c.piController.next(input, setpoint, period)
  1439  }
  1440  
  1441  const (
  1442  	CapacityPerProc          = capacityPerProc
  1443  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1444  )
  1445  
  1446  type GCCPULimiter struct {
  1447  	limiter gcCPULimiterState
  1448  }
  1449  
  1450  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1451  	// Force the controller to escape. We're going to
  1452  	// do 64-bit atomics on it, and if it gets stack-allocated
  1453  	// on a 32-bit architecture, it may get allocated unaligned
  1454  	// space.
  1455  	l := Escape(new(GCCPULimiter))
  1456  	l.limiter.test = true
  1457  	l.limiter.resetCapacity(now, gomaxprocs)
  1458  	return l
  1459  }
  1460  
  1461  func (l *GCCPULimiter) Fill() uint64 {
  1462  	return l.limiter.bucket.fill
  1463  }
  1464  
  1465  func (l *GCCPULimiter) Capacity() uint64 {
  1466  	return l.limiter.bucket.capacity
  1467  }
  1468  
  1469  func (l *GCCPULimiter) Overflow() uint64 {
  1470  	return l.limiter.overflow
  1471  }
  1472  
  1473  func (l *GCCPULimiter) Limiting() bool {
  1474  	return l.limiter.limiting()
  1475  }
  1476  
  1477  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1478  	return l.limiter.needUpdate(now)
  1479  }
  1480  
  1481  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1482  	l.limiter.startGCTransition(enableGC, now)
  1483  }
  1484  
  1485  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1486  	l.limiter.finishGCTransition(now)
  1487  }
  1488  
  1489  func (l *GCCPULimiter) Update(now int64) {
  1490  	l.limiter.update(now)
  1491  }
  1492  
  1493  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1494  	l.limiter.addAssistTime(t)
  1495  }
  1496  
  1497  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1498  	l.limiter.resetCapacity(now, nprocs)
  1499  }
  1500  
  1501  const ScavengePercent = scavengePercent
  1502  
  1503  type Scavenger struct {
  1504  	Sleep      func(int64) int64
  1505  	Scavenge   func(uintptr) (uintptr, int64)
  1506  	ShouldStop func() bool
  1507  	GoMaxProcs func() int32
  1508  
  1509  	released  atomic.Uintptr
  1510  	scavenger scavengerState
  1511  	stop      chan<- struct{}
  1512  	done      <-chan struct{}
  1513  }
  1514  
  1515  func (s *Scavenger) Start() {
  1516  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1517  		panic("must populate all stubs")
  1518  	}
  1519  
  1520  	// Install hooks.
  1521  	s.scavenger.sleepStub = s.Sleep
  1522  	s.scavenger.scavenge = s.Scavenge
  1523  	s.scavenger.shouldStop = s.ShouldStop
  1524  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1525  
  1526  	// Start up scavenger goroutine, and wait for it to be ready.
  1527  	stop := make(chan struct{})
  1528  	s.stop = stop
  1529  	done := make(chan struct{})
  1530  	s.done = done
  1531  	go func() {
  1532  		// This should match bgscavenge, loosely.
  1533  		s.scavenger.init()
  1534  		s.scavenger.park()
  1535  		for {
  1536  			select {
  1537  			case <-stop:
  1538  				close(done)
  1539  				return
  1540  			default:
  1541  			}
  1542  			released, workTime := s.scavenger.run()
  1543  			if released == 0 {
  1544  				s.scavenger.park()
  1545  				continue
  1546  			}
  1547  			s.released.Add(released)
  1548  			s.scavenger.sleep(workTime)
  1549  		}
  1550  	}()
  1551  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1552  		panic("timed out waiting for scavenger to get ready")
  1553  	}
  1554  }
  1555  
  1556  // BlockUntilParked blocks until the scavenger parks, or until
  1557  // timeout is exceeded. Returns true if the scavenger parked.
  1558  //
  1559  // Note that in testing, parked means something slightly different.
  1560  // In anger, the scavenger parks to sleep, too, but in testing,
  1561  // it only parks when it actually has no work to do.
  1562  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1563  	// Just spin, waiting for it to park.
  1564  	//
  1565  	// The actual parking process is racy with respect to
  1566  	// wakeups, which is fine, but for testing we need something
  1567  	// a bit more robust.
  1568  	start := nanotime()
  1569  	for nanotime()-start < timeout {
  1570  		lock(&s.scavenger.lock)
  1571  		parked := s.scavenger.parked
  1572  		unlock(&s.scavenger.lock)
  1573  		if parked {
  1574  			return true
  1575  		}
  1576  		Gosched()
  1577  	}
  1578  	return false
  1579  }
  1580  
  1581  // Released returns how many bytes the scavenger released.
  1582  func (s *Scavenger) Released() uintptr {
  1583  	return s.released.Load()
  1584  }
  1585  
  1586  // Wake wakes up a parked scavenger to keep running.
  1587  func (s *Scavenger) Wake() {
  1588  	s.scavenger.wake()
  1589  }
  1590  
  1591  // Stop cleans up the scavenger's resources. The scavenger
  1592  // must be parked for this to work.
  1593  func (s *Scavenger) Stop() {
  1594  	lock(&s.scavenger.lock)
  1595  	parked := s.scavenger.parked
  1596  	unlock(&s.scavenger.lock)
  1597  	if !parked {
  1598  		panic("tried to clean up scavenger that is not parked")
  1599  	}
  1600  	close(s.stop)
  1601  	s.Wake()
  1602  	<-s.done
  1603  }
  1604  
  1605  type ScavengeIndex struct {
  1606  	i scavengeIndex
  1607  }
  1608  
  1609  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1610  	s := new(ScavengeIndex)
  1611  	s.i.chunks = make([]atomic.Uint8, uintptr(1<<heapAddrBits/pallocChunkBytes/8))
  1612  	s.i.min.Store(int32(min / 8))
  1613  	s.i.max.Store(int32(max / 8))
  1614  	return s
  1615  }
  1616  
  1617  func (s *ScavengeIndex) Find() (ChunkIdx, uint) {
  1618  	ci, off := s.i.find()
  1619  	return ChunkIdx(ci), off
  1620  }
  1621  
  1622  func (s *ScavengeIndex) Mark(base, limit uintptr) {
  1623  	s.i.mark(base, limit)
  1624  }
  1625  
  1626  func (s *ScavengeIndex) Clear(ci ChunkIdx) {
  1627  	s.i.clear(chunkIdx(ci))
  1628  }
  1629  
  1630  const GTrackingPeriod = gTrackingPeriod
  1631  
  1632  var ZeroBase = unsafe.Pointer(&zerobase)
  1633  
  1634  const UserArenaChunkBytes = userArenaChunkBytes
  1635  
  1636  type UserArena struct {
  1637  	arena *userArena
  1638  }
  1639  
  1640  func NewUserArena() *UserArena {
  1641  	return &UserArena{newUserArena()}
  1642  }
  1643  
  1644  func (a *UserArena) New(out *any) {
  1645  	i := efaceOf(out)
  1646  	typ := i._type
  1647  	if typ.kind&kindMask != kindPtr {
  1648  		panic("new result of non-ptr type")
  1649  	}
  1650  	typ = (*ptrtype)(unsafe.Pointer(typ)).elem
  1651  	i.data = a.arena.new(typ)
  1652  }
  1653  
  1654  func (a *UserArena) Slice(sl any, cap int) {
  1655  	a.arena.slice(sl, cap)
  1656  }
  1657  
  1658  func (a *UserArena) Free() {
  1659  	a.arena.free()
  1660  }
  1661  
  1662  func GlobalWaitingArenaChunks() int {
  1663  	n := 0
  1664  	systemstack(func() {
  1665  		lock(&mheap_.lock)
  1666  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1667  			n++
  1668  		}
  1669  		unlock(&mheap_.lock)
  1670  	})
  1671  	return n
  1672  }
  1673  
  1674  func UserArenaClone[T any](s T) T {
  1675  	return arena_heapify(s).(T)
  1676  }
  1677  
  1678  var AlignUp = alignUp
  1679  
  1680  // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
  1681  // queue is emptied (and the finalizers have executed) or the timeout
  1682  // is reached. Returns true if the finalizer queue was emptied.
  1683  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1684  	start := nanotime()
  1685  	for nanotime()-start < timeout {
  1686  		lock(&finlock)
  1687  		// We know the queue has been drained when both finq is nil
  1688  		// and the finalizer g has stopped executing.
  1689  		empty := finq == nil
  1690  		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
  1691  		unlock(&finlock)
  1692  		if empty {
  1693  			return true
  1694  		}
  1695  		Gosched()
  1696  	}
  1697  	return false
  1698  }
  1699  
  1700  func FrameStartLine(f *Frame) int {
  1701  	return f.startLine
  1702  }