github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goexperiment"
    13  	"internal/goos"
    14  	"runtime/internal/atomic"
    15  	"runtime/internal/sys"
    16  	"unsafe"
    17  )
    18  
    19  var Fadd64 = fadd64
    20  var Fsub64 = fsub64
    21  var Fmul64 = fmul64
    22  var Fdiv64 = fdiv64
    23  var F64to32 = f64to32
    24  var F32to64 = f32to64
    25  var Fcmp64 = fcmp64
    26  var Fintto64 = fintto64
    27  var F64toint = f64toint
    28  
    29  var Entersyscall = entersyscall
    30  var Exitsyscall = exitsyscall
    31  var LockedOSThread = lockedOSThread
    32  var Xadduintptr = atomic.Xadduintptr
    33  
    34  var ReadRandomFailed = &readRandomFailed
    35  
    36  var Fastlog2 = fastlog2
    37  
    38  var Atoi = atoi
    39  var Atoi32 = atoi32
    40  var ParseByteCount = parseByteCount
    41  
    42  var Nanotime = nanotime
    43  var NetpollBreak = netpollBreak
    44  var Usleep = usleep
    45  
    46  var PhysPageSize = physPageSize
    47  var PhysHugePageSize = physHugePageSize
    48  
    49  var NetpollGenericInit = netpollGenericInit
    50  
    51  var Memmove = memmove
    52  var MemclrNoHeapPointers = memclrNoHeapPointers
    53  
    54  var CgoCheckPointer = cgoCheckPointer
    55  
    56  const CrashStackImplemented = crashStackImplemented
    57  
    58  const TracebackInnerFrames = tracebackInnerFrames
    59  const TracebackOuterFrames = tracebackOuterFrames
    60  
    61  var MapKeys = keys
    62  var MapValues = values
    63  
    64  var LockPartialOrder = lockPartialOrder
    65  
    66  type TimeTimer = timeTimer
    67  
    68  type LockRank lockRank
    69  
    70  func (l LockRank) String() string {
    71  	return lockRank(l).String()
    72  }
    73  
    74  const PreemptMSupported = preemptMSupported
    75  
    76  type LFNode struct {
    77  	Next    uint64
    78  	Pushcnt uintptr
    79  }
    80  
    81  func LFStackPush(head *uint64, node *LFNode) {
    82  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    83  }
    84  
    85  func LFStackPop(head *uint64) *LFNode {
    86  	return (*LFNode)((*lfstack)(head).pop())
    87  }
    88  func LFNodeValidate(node *LFNode) {
    89  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    90  }
    91  
    92  func Netpoll(delta int64) {
    93  	systemstack(func() {
    94  		netpoll(delta)
    95  	})
    96  }
    97  
    98  func GCMask(x any) (ret []byte) {
    99  	systemstack(func() {
   100  		ret = getgcmask(x)
   101  	})
   102  	return
   103  }
   104  
   105  func RunSchedLocalQueueTest() {
   106  	pp := new(p)
   107  	gs := make([]g, len(pp.runq))
   108  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   109  	for i := 0; i < len(pp.runq); i++ {
   110  		if g, _ := runqget(pp); g != nil {
   111  			throw("runq is not empty initially")
   112  		}
   113  		for j := 0; j < i; j++ {
   114  			runqput(pp, &gs[i], false)
   115  		}
   116  		for j := 0; j < i; j++ {
   117  			if g, _ := runqget(pp); g != &gs[i] {
   118  				print("bad element at iter ", i, "/", j, "\n")
   119  				throw("bad element")
   120  			}
   121  		}
   122  		if g, _ := runqget(pp); g != nil {
   123  			throw("runq is not empty afterwards")
   124  		}
   125  	}
   126  }
   127  
   128  func RunSchedLocalQueueStealTest() {
   129  	p1 := new(p)
   130  	p2 := new(p)
   131  	gs := make([]g, len(p1.runq))
   132  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   133  	for i := 0; i < len(p1.runq); i++ {
   134  		for j := 0; j < i; j++ {
   135  			gs[j].sig = 0
   136  			runqput(p1, &gs[j], false)
   137  		}
   138  		gp := runqsteal(p2, p1, true)
   139  		s := 0
   140  		if gp != nil {
   141  			s++
   142  			gp.sig++
   143  		}
   144  		for {
   145  			gp, _ = runqget(p2)
   146  			if gp == nil {
   147  				break
   148  			}
   149  			s++
   150  			gp.sig++
   151  		}
   152  		for {
   153  			gp, _ = runqget(p1)
   154  			if gp == nil {
   155  				break
   156  			}
   157  			gp.sig++
   158  		}
   159  		for j := 0; j < i; j++ {
   160  			if gs[j].sig != 1 {
   161  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   162  				throw("bad element")
   163  			}
   164  		}
   165  		if s != i/2 && s != i/2+1 {
   166  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   167  			throw("bad steal")
   168  		}
   169  	}
   170  }
   171  
   172  func RunSchedLocalQueueEmptyTest(iters int) {
   173  	// Test that runq is not spuriously reported as empty.
   174  	// Runq emptiness affects scheduling decisions and spurious emptiness
   175  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   176  	// for arbitrary long time).
   177  	done := make(chan bool, 1)
   178  	p := new(p)
   179  	gs := make([]g, 2)
   180  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   181  	ready := new(uint32)
   182  	for i := 0; i < iters; i++ {
   183  		*ready = 0
   184  		next0 := (i & 1) == 0
   185  		next1 := (i & 2) == 0
   186  		runqput(p, &gs[0], next0)
   187  		go func() {
   188  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   189  			}
   190  			if runqempty(p) {
   191  				println("next:", next0, next1)
   192  				throw("queue is empty")
   193  			}
   194  			done <- true
   195  		}()
   196  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   197  		}
   198  		runqput(p, &gs[1], next1)
   199  		runqget(p)
   200  		<-done
   201  		runqget(p)
   202  	}
   203  }
   204  
   205  var (
   206  	StringHash = stringHash
   207  	BytesHash  = bytesHash
   208  	Int32Hash  = int32Hash
   209  	Int64Hash  = int64Hash
   210  	MemHash    = memhash
   211  	MemHash32  = memhash32
   212  	MemHash64  = memhash64
   213  	EfaceHash  = efaceHash
   214  	IfaceHash  = ifaceHash
   215  )
   216  
   217  var UseAeshash = &useAeshash
   218  
   219  func MemclrBytes(b []byte) {
   220  	s := (*slice)(unsafe.Pointer(&b))
   221  	memclrNoHeapPointers(s.array, uintptr(s.len))
   222  }
   223  
   224  const HashLoad = hashLoad
   225  
   226  // entry point for testing
   227  func GostringW(w []uint16) (s string) {
   228  	systemstack(func() {
   229  		s = gostringw(&w[0])
   230  	})
   231  	return
   232  }
   233  
   234  var Open = open
   235  var Close = closefd
   236  var Read = read
   237  var Write = write
   238  
   239  func Envs() []string     { return envs }
   240  func SetEnvs(e []string) { envs = e }
   241  
   242  // For benchmarking.
   243  
   244  // blockWrapper is a wrapper type that ensures a T is placed within a
   245  // large object. This is necessary for safely benchmarking things
   246  // that manipulate the heap bitmap, like heapBitsSetType.
   247  //
   248  // More specifically, allocating threads assume they're the sole writers
   249  // to their span's heap bits, which allows those writes to be non-atomic.
   250  // The heap bitmap is written byte-wise, so if one tried to call heapBitsSetType
   251  // on an existing object in a small object span, we might corrupt that
   252  // span's bitmap with a concurrent byte write to the heap bitmap. Large
   253  // object spans contain exactly one object, so we can be sure no other P
   254  // is going to be allocating from it concurrently, hence this wrapper type
   255  // which ensures we have a T in a large object span.
   256  type blockWrapper[T any] struct {
   257  	value T
   258  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   259  }
   260  
   261  func BenchSetType[T any](n int, resetTimer func()) {
   262  	x := new(blockWrapper[T])
   263  
   264  	// Escape x to ensure it is allocated on the heap, as we are
   265  	// working on the heap bits here.
   266  	Escape(x)
   267  
   268  	// Grab the type.
   269  	var i any = *new(T)
   270  	e := *efaceOf(&i)
   271  	t := e._type
   272  
   273  	// Benchmark setting the type bits for just the internal T of the block.
   274  	benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
   275  }
   276  
   277  const maxArrayBlockWrapperLen = 32
   278  
   279  // arrayBlockWrapper is like blockWrapper, but the interior value is intended
   280  // to be used as a backing store for a slice.
   281  type arrayBlockWrapper[T any] struct {
   282  	value [maxArrayBlockWrapperLen]T
   283  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   284  }
   285  
   286  // arrayLargeBlockWrapper is like arrayBlockWrapper, but the interior array
   287  // accommodates many more elements.
   288  type arrayLargeBlockWrapper[T any] struct {
   289  	value [1024]T
   290  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   291  }
   292  
   293  func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
   294  	// We have two separate cases here because we want to avoid
   295  	// tests on big types but relatively small slices to avoid generating
   296  	// an allocation that's really big. This will likely force a GC which will
   297  	// skew the test results.
   298  	var y unsafe.Pointer
   299  	if len <= maxArrayBlockWrapperLen {
   300  		x := new(arrayBlockWrapper[T])
   301  		// Escape x to ensure it is allocated on the heap, as we are
   302  		// working on the heap bits here.
   303  		Escape(x)
   304  		y = unsafe.Pointer(&x.value[0])
   305  	} else {
   306  		x := new(arrayLargeBlockWrapper[T])
   307  		Escape(x)
   308  		y = unsafe.Pointer(&x.value[0])
   309  	}
   310  
   311  	// Grab the type.
   312  	var i any = *new(T)
   313  	e := *efaceOf(&i)
   314  	t := e._type
   315  
   316  	// Benchmark setting the type for a slice created from the array
   317  	// of T within the arrayBlock.
   318  	benchSetType(n, resetTimer, len, y, t)
   319  }
   320  
   321  // benchSetType is the implementation of the BenchSetType* functions.
   322  // x must be len consecutive Ts allocated within a large object span (to
   323  // avoid a race on the heap bitmap).
   324  //
   325  // Note: this function cannot be generic. It would get its type from one of
   326  // its callers (BenchSetType or BenchSetTypeSlice) whose type parameters are
   327  // set by a call in the runtime_test package. That means this function and its
   328  // callers will get instantiated in the package that provides the type argument,
   329  // i.e. runtime_test. However, we call a function on the system stack. In race
   330  // mode the runtime package is usually left uninstrumented because e.g. g0 has
   331  // no valid racectx, but if we're instantiated in the runtime_test package,
   332  // we might accidentally cause runtime code to be incorrectly instrumented.
   333  func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
   334  	// This benchmark doesn't work with the allocheaders experiment. It sets up
   335  	// an elaborate scenario to be able to benchmark the function safely, but doing
   336  	// this work for the allocheaders' version of the function would be complex.
   337  	// Just fail instead and rely on the test code making sure we never get here.
   338  	if goexperiment.AllocHeaders {
   339  		panic("called benchSetType with allocheaders experiment enabled")
   340  	}
   341  
   342  	// Compute the input sizes.
   343  	size := t.Size() * uintptr(len)
   344  
   345  	// Validate this function's invariant.
   346  	s := spanOfHeap(uintptr(x))
   347  	if s == nil {
   348  		panic("no heap span for input")
   349  	}
   350  	if s.spanclass.sizeclass() != 0 {
   351  		panic("span is not a large object span")
   352  	}
   353  
   354  	// Round up the size to the size class to make the benchmark a little more
   355  	// realistic. However, validate it, to make sure this is safe.
   356  	allocSize := roundupsize(size, !t.Pointers())
   357  	if s.npages*pageSize < allocSize {
   358  		panic("backing span not large enough for benchmark")
   359  	}
   360  
   361  	// Benchmark heapBitsSetType by calling it in a loop. This is safe because
   362  	// x is in a large object span.
   363  	resetTimer()
   364  	systemstack(func() {
   365  		for i := 0; i < n; i++ {
   366  			heapBitsSetType(uintptr(x), allocSize, size, t)
   367  		}
   368  	})
   369  
   370  	// Make sure x doesn't get freed, since we're taking a uintptr.
   371  	KeepAlive(x)
   372  }
   373  
   374  const PtrSize = goarch.PtrSize
   375  
   376  var ForceGCPeriod = &forcegcperiod
   377  
   378  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   379  // the "environment" traceback level, so later calls to
   380  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   381  func SetTracebackEnv(level string) {
   382  	setTraceback(level)
   383  	traceback_env = traceback_cache
   384  }
   385  
   386  var ReadUnaligned32 = readUnaligned32
   387  var ReadUnaligned64 = readUnaligned64
   388  
   389  func CountPagesInUse() (pagesInUse, counted uintptr) {
   390  	stw := stopTheWorld(stwForTestCountPagesInUse)
   391  
   392  	pagesInUse = mheap_.pagesInUse.Load()
   393  
   394  	for _, s := range mheap_.allspans {
   395  		if s.state.get() == mSpanInUse {
   396  			counted += s.npages
   397  		}
   398  	}
   399  
   400  	startTheWorld(stw)
   401  
   402  	return
   403  }
   404  
   405  func Fastrand() uint32          { return uint32(rand()) }
   406  func Fastrand64() uint64        { return rand() }
   407  func Fastrandn(n uint32) uint32 { return randn(n) }
   408  
   409  type ProfBuf profBuf
   410  
   411  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   412  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   413  }
   414  
   415  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   416  	(*profBuf)(p).write(tag, now, hdr, stk)
   417  }
   418  
   419  const (
   420  	ProfBufBlocking    = profBufBlocking
   421  	ProfBufNonBlocking = profBufNonBlocking
   422  )
   423  
   424  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   425  	return (*profBuf)(p).read(mode)
   426  }
   427  
   428  func (p *ProfBuf) Close() {
   429  	(*profBuf)(p).close()
   430  }
   431  
   432  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   433  	stw := stopTheWorld(stwForTestReadMetricsSlow)
   434  
   435  	// Initialize the metrics beforehand because this could
   436  	// allocate and skew the stats.
   437  	metricsLock()
   438  	initMetrics()
   439  
   440  	systemstack(func() {
   441  		// Donate the racectx to g0. readMetricsLocked calls into the race detector
   442  		// via map access.
   443  		getg().racectx = getg().m.curg.racectx
   444  
   445  		// Read the metrics once before in case it allocates and skews the metrics.
   446  		// readMetricsLocked is designed to only allocate the first time it is called
   447  		// with a given slice of samples. In effect, this extra read tests that this
   448  		// remains true, since otherwise the second readMetricsLocked below could
   449  		// allocate before it returns.
   450  		readMetricsLocked(samplesp, len, cap)
   451  
   452  		// Read memstats first. It's going to flush
   453  		// the mcaches which readMetrics does not do, so
   454  		// going the other way around may result in
   455  		// inconsistent statistics.
   456  		readmemstats_m(memStats)
   457  
   458  		// Read metrics again. We need to be sure we're on the
   459  		// system stack with readmemstats_m so that we don't call into
   460  		// the stack allocator and adjust metrics between there and here.
   461  		readMetricsLocked(samplesp, len, cap)
   462  
   463  		// Undo the donation.
   464  		getg().racectx = 0
   465  	})
   466  	metricsUnlock()
   467  
   468  	startTheWorld(stw)
   469  }
   470  
   471  var DoubleCheckReadMemStats = &doubleCheckReadMemStats
   472  
   473  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   474  // MemStats accumulated by scanning the heap.
   475  func ReadMemStatsSlow() (base, slow MemStats) {
   476  	stw := stopTheWorld(stwForTestReadMemStatsSlow)
   477  
   478  	// Run on the system stack to avoid stack growth allocation.
   479  	systemstack(func() {
   480  		// Make sure stats don't change.
   481  		getg().m.mallocing++
   482  
   483  		readmemstats_m(&base)
   484  
   485  		// Initialize slow from base and zero the fields we're
   486  		// recomputing.
   487  		slow = base
   488  		slow.Alloc = 0
   489  		slow.TotalAlloc = 0
   490  		slow.Mallocs = 0
   491  		slow.Frees = 0
   492  		slow.HeapReleased = 0
   493  		var bySize [_NumSizeClasses]struct {
   494  			Mallocs, Frees uint64
   495  		}
   496  
   497  		// Add up current allocations in spans.
   498  		for _, s := range mheap_.allspans {
   499  			if s.state.get() != mSpanInUse {
   500  				continue
   501  			}
   502  			if s.isUnusedUserArenaChunk() {
   503  				continue
   504  			}
   505  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   506  				slow.Mallocs++
   507  				slow.Alloc += uint64(s.elemsize)
   508  			} else {
   509  				slow.Mallocs += uint64(s.allocCount)
   510  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   511  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   512  			}
   513  		}
   514  
   515  		// Add in frees by just reading the stats for those directly.
   516  		var m heapStatsDelta
   517  		memstats.heapStats.unsafeRead(&m)
   518  
   519  		// Collect per-sizeclass free stats.
   520  		var smallFree uint64
   521  		for i := 0; i < _NumSizeClasses; i++ {
   522  			slow.Frees += m.smallFreeCount[i]
   523  			bySize[i].Frees += m.smallFreeCount[i]
   524  			bySize[i].Mallocs += m.smallFreeCount[i]
   525  			smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
   526  		}
   527  		slow.Frees += m.tinyAllocCount + m.largeFreeCount
   528  		slow.Mallocs += slow.Frees
   529  
   530  		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
   531  
   532  		for i := range slow.BySize {
   533  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   534  			slow.BySize[i].Frees = bySize[i].Frees
   535  		}
   536  
   537  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   538  			chunk := mheap_.pages.tryChunkOf(i)
   539  			if chunk == nil {
   540  				continue
   541  			}
   542  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   543  			slow.HeapReleased += uint64(pg) * pageSize
   544  		}
   545  		for _, p := range allp {
   546  			pg := sys.OnesCount64(p.pcache.scav)
   547  			slow.HeapReleased += uint64(pg) * pageSize
   548  		}
   549  
   550  		getg().m.mallocing--
   551  	})
   552  
   553  	startTheWorld(stw)
   554  	return
   555  }
   556  
   557  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   558  // and verifies that unwinding the new stack doesn't crash, even if the old
   559  // stack has been freed or reused (simulated via poisoning).
   560  func ShrinkStackAndVerifyFramePointers() {
   561  	before := stackPoisonCopy
   562  	defer func() { stackPoisonCopy = before }()
   563  	stackPoisonCopy = 1
   564  
   565  	gp := getg()
   566  	systemstack(func() {
   567  		shrinkstack(gp)
   568  	})
   569  	// If our new stack contains frame pointers into the old stack, this will
   570  	// crash because the old stack has been poisoned.
   571  	FPCallers(make([]uintptr, 1024))
   572  }
   573  
   574  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   575  // stderr, and blocks in a stack containing
   576  // "runtime.blockOnSystemStackInternal".
   577  func BlockOnSystemStack() {
   578  	systemstack(blockOnSystemStackInternal)
   579  }
   580  
   581  func blockOnSystemStackInternal() {
   582  	print("x\n")
   583  	lock(&deadlock)
   584  	lock(&deadlock)
   585  }
   586  
   587  type RWMutex struct {
   588  	rw rwmutex
   589  }
   590  
   591  func (rw *RWMutex) Init() {
   592  	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
   593  }
   594  
   595  func (rw *RWMutex) RLock() {
   596  	rw.rw.rlock()
   597  }
   598  
   599  func (rw *RWMutex) RUnlock() {
   600  	rw.rw.runlock()
   601  }
   602  
   603  func (rw *RWMutex) Lock() {
   604  	rw.rw.lock()
   605  }
   606  
   607  func (rw *RWMutex) Unlock() {
   608  	rw.rw.unlock()
   609  }
   610  
   611  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   612  
   613  func MapBucketsCount(m map[int]int) int {
   614  	h := *(**hmap)(unsafe.Pointer(&m))
   615  	return 1 << h.B
   616  }
   617  
   618  func MapBucketsPointerIsNil(m map[int]int) bool {
   619  	h := *(**hmap)(unsafe.Pointer(&m))
   620  	return h.buckets == nil
   621  }
   622  
   623  func OverLoadFactor(count int, B uint8) bool {
   624  	return overLoadFactor(count, B)
   625  }
   626  
   627  func LockOSCounts() (external, internal uint32) {
   628  	gp := getg()
   629  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   630  		if gp.lockedm != 0 {
   631  			panic("lockedm on non-locked goroutine")
   632  		}
   633  	} else {
   634  		if gp.lockedm == 0 {
   635  			panic("nil lockedm on locked goroutine")
   636  		}
   637  	}
   638  	return gp.m.lockedExt, gp.m.lockedInt
   639  }
   640  
   641  //go:noinline
   642  func TracebackSystemstack(stk []uintptr, i int) int {
   643  	if i == 0 {
   644  		pc, sp := getcallerpc(), getcallersp()
   645  		var u unwinder
   646  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   647  		return tracebackPCs(&u, 0, stk)
   648  	}
   649  	n := 0
   650  	systemstack(func() {
   651  		n = TracebackSystemstack(stk, i-1)
   652  	})
   653  	return n
   654  }
   655  
   656  func KeepNArenaHints(n int) {
   657  	hint := mheap_.arenaHints
   658  	for i := 1; i < n; i++ {
   659  		hint = hint.next
   660  		if hint == nil {
   661  			return
   662  		}
   663  	}
   664  	hint.next = nil
   665  }
   666  
   667  // MapNextArenaHint reserves a page at the next arena growth hint,
   668  // preventing the arena from growing there, and returns the range of
   669  // addresses that are no longer viable.
   670  //
   671  // This may fail to reserve memory. If it fails, it still returns the
   672  // address range it attempted to reserve.
   673  func MapNextArenaHint() (start, end uintptr, ok bool) {
   674  	hint := mheap_.arenaHints
   675  	addr := hint.addr
   676  	if hint.down {
   677  		start, end = addr-heapArenaBytes, addr
   678  		addr -= physPageSize
   679  	} else {
   680  		start, end = addr, addr+heapArenaBytes
   681  	}
   682  	got := sysReserve(unsafe.Pointer(addr), physPageSize)
   683  	ok = (addr == uintptr(got))
   684  	if !ok {
   685  		// We were unable to get the requested reservation.
   686  		// Release what we did get and fail.
   687  		sysFreeOS(got, physPageSize)
   688  	}
   689  	return
   690  }
   691  
   692  func GetNextArenaHint() uintptr {
   693  	return mheap_.arenaHints.addr
   694  }
   695  
   696  type G = g
   697  
   698  type Sudog = sudog
   699  
   700  func Getg() *G {
   701  	return getg()
   702  }
   703  
   704  func Goid() uint64 {
   705  	return getg().goid
   706  }
   707  
   708  func GIsWaitingOnMutex(gp *G) bool {
   709  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   710  }
   711  
   712  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   713  
   714  //go:noinline
   715  func PanicForTesting(b []byte, i int) byte {
   716  	return unexportedPanicForTesting(b, i)
   717  }
   718  
   719  //go:noinline
   720  func unexportedPanicForTesting(b []byte, i int) byte {
   721  	return b[i]
   722  }
   723  
   724  func G0StackOverflow() {
   725  	systemstack(func() {
   726  		g0 := getg()
   727  		sp := getcallersp()
   728  		// The stack bounds for g0 stack is not always precise.
   729  		// Use an artificially small stack, to trigger a stack overflow
   730  		// without actually run out of the system stack (which may seg fault).
   731  		g0.stack.lo = sp - 4096 - stackSystem
   732  		g0.stackguard0 = g0.stack.lo + stackGuard
   733  		g0.stackguard1 = g0.stackguard0
   734  
   735  		stackOverflow(nil)
   736  	})
   737  }
   738  
   739  func stackOverflow(x *byte) {
   740  	var buf [256]byte
   741  	stackOverflow(&buf[0])
   742  }
   743  
   744  func MapTombstoneCheck(m map[int]int) {
   745  	// Make sure emptyOne and emptyRest are distributed correctly.
   746  	// We should have a series of filled and emptyOne cells, followed by
   747  	// a series of emptyRest cells.
   748  	h := *(**hmap)(unsafe.Pointer(&m))
   749  	i := any(m)
   750  	t := *(**maptype)(unsafe.Pointer(&i))
   751  
   752  	for x := 0; x < 1<<h.B; x++ {
   753  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
   754  		n := 0
   755  		for b := b0; b != nil; b = b.overflow(t) {
   756  			for i := 0; i < abi.MapBucketCount; i++ {
   757  				if b.tophash[i] != emptyRest {
   758  					n++
   759  				}
   760  			}
   761  		}
   762  		k := 0
   763  		for b := b0; b != nil; b = b.overflow(t) {
   764  			for i := 0; i < abi.MapBucketCount; i++ {
   765  				if k < n && b.tophash[i] == emptyRest {
   766  					panic("early emptyRest")
   767  				}
   768  				if k >= n && b.tophash[i] != emptyRest {
   769  					panic("late non-emptyRest")
   770  				}
   771  				if k == n-1 && b.tophash[i] == emptyOne {
   772  					panic("last non-emptyRest entry is emptyOne")
   773  				}
   774  				k++
   775  			}
   776  		}
   777  	}
   778  }
   779  
   780  func RunGetgThreadSwitchTest() {
   781  	// Test that getg works correctly with thread switch.
   782  	// With gccgo, if we generate getg inlined, the backend
   783  	// may cache the address of the TLS variable, which
   784  	// will become invalid after a thread switch. This test
   785  	// checks that the bad caching doesn't happen.
   786  
   787  	ch := make(chan int)
   788  	go func(ch chan int) {
   789  		ch <- 5
   790  		LockOSThread()
   791  	}(ch)
   792  
   793  	g1 := getg()
   794  
   795  	// Block on a receive. This is likely to get us a thread
   796  	// switch. If we yield to the sender goroutine, it will
   797  	// lock the thread, forcing us to resume on a different
   798  	// thread.
   799  	<-ch
   800  
   801  	g2 := getg()
   802  	if g1 != g2 {
   803  		panic("g1 != g2")
   804  	}
   805  
   806  	// Also test getg after some control flow, as the
   807  	// backend is sensitive to control flow.
   808  	g3 := getg()
   809  	if g1 != g3 {
   810  		panic("g1 != g3")
   811  	}
   812  }
   813  
   814  const (
   815  	PageSize         = pageSize
   816  	PallocChunkPages = pallocChunkPages
   817  	PageAlloc64Bit   = pageAlloc64Bit
   818  	PallocSumBytes   = pallocSumBytes
   819  )
   820  
   821  // Expose pallocSum for testing.
   822  type PallocSum pallocSum
   823  
   824  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   825  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   826  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   827  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   828  
   829  // Expose pallocBits for testing.
   830  type PallocBits pallocBits
   831  
   832  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   833  	return (*pallocBits)(b).find(npages, searchIdx)
   834  }
   835  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   836  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   837  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   838  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   839  
   840  // SummarizeSlow is a slow but more obviously correct implementation
   841  // of (*pallocBits).summarize. Used for testing.
   842  func SummarizeSlow(b *PallocBits) PallocSum {
   843  	var start, most, end uint
   844  
   845  	const N = uint(len(b)) * 64
   846  	for start < N && (*pageBits)(b).get(start) == 0 {
   847  		start++
   848  	}
   849  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   850  		end++
   851  	}
   852  	run := uint(0)
   853  	for i := uint(0); i < N; i++ {
   854  		if (*pageBits)(b).get(i) == 0 {
   855  			run++
   856  		} else {
   857  			run = 0
   858  		}
   859  		most = max(most, run)
   860  	}
   861  	return PackPallocSum(start, most, end)
   862  }
   863  
   864  // Expose non-trivial helpers for testing.
   865  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   866  
   867  // Given two PallocBits, returns a set of bit ranges where
   868  // they differ.
   869  func DiffPallocBits(a, b *PallocBits) []BitRange {
   870  	ba := (*pageBits)(a)
   871  	bb := (*pageBits)(b)
   872  
   873  	var d []BitRange
   874  	base, size := uint(0), uint(0)
   875  	for i := uint(0); i < uint(len(ba))*64; i++ {
   876  		if ba.get(i) != bb.get(i) {
   877  			if size == 0 {
   878  				base = i
   879  			}
   880  			size++
   881  		} else {
   882  			if size != 0 {
   883  				d = append(d, BitRange{base, size})
   884  			}
   885  			size = 0
   886  		}
   887  	}
   888  	if size != 0 {
   889  		d = append(d, BitRange{base, size})
   890  	}
   891  	return d
   892  }
   893  
   894  // StringifyPallocBits gets the bits in the bit range r from b,
   895  // and returns a string containing the bits as ASCII 0 and 1
   896  // characters.
   897  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   898  	str := ""
   899  	for j := r.I; j < r.I+r.N; j++ {
   900  		if (*pageBits)(b).get(j) != 0 {
   901  			str += "1"
   902  		} else {
   903  			str += "0"
   904  		}
   905  	}
   906  	return str
   907  }
   908  
   909  // Expose pallocData for testing.
   910  type PallocData pallocData
   911  
   912  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   913  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   914  }
   915  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   916  func (d *PallocData) ScavengedSetRange(i, n uint) {
   917  	(*pallocData)(d).scavenged.setRange(i, n)
   918  }
   919  func (d *PallocData) PallocBits() *PallocBits {
   920  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   921  }
   922  func (d *PallocData) Scavenged() *PallocBits {
   923  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   924  }
   925  
   926  // Expose fillAligned for testing.
   927  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   928  
   929  // Expose pageCache for testing.
   930  type PageCache pageCache
   931  
   932  const PageCachePages = pageCachePages
   933  
   934  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   935  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   936  }
   937  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   938  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   939  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   940  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   941  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   942  	return (*pageCache)(c).alloc(npages)
   943  }
   944  func (c *PageCache) Flush(s *PageAlloc) {
   945  	cp := (*pageCache)(c)
   946  	sp := (*pageAlloc)(s)
   947  
   948  	systemstack(func() {
   949  		// None of the tests need any higher-level locking, so we just
   950  		// take the lock internally.
   951  		lock(sp.mheapLock)
   952  		cp.flush(sp)
   953  		unlock(sp.mheapLock)
   954  	})
   955  }
   956  
   957  // Expose chunk index type.
   958  type ChunkIdx chunkIdx
   959  
   960  // Expose pageAlloc for testing. Note that because pageAlloc is
   961  // not in the heap, so is PageAlloc.
   962  type PageAlloc pageAlloc
   963  
   964  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   965  	pp := (*pageAlloc)(p)
   966  
   967  	var addr, scav uintptr
   968  	systemstack(func() {
   969  		// None of the tests need any higher-level locking, so we just
   970  		// take the lock internally.
   971  		lock(pp.mheapLock)
   972  		addr, scav = pp.alloc(npages)
   973  		unlock(pp.mheapLock)
   974  	})
   975  	return addr, scav
   976  }
   977  func (p *PageAlloc) AllocToCache() PageCache {
   978  	pp := (*pageAlloc)(p)
   979  
   980  	var c PageCache
   981  	systemstack(func() {
   982  		// None of the tests need any higher-level locking, so we just
   983  		// take the lock internally.
   984  		lock(pp.mheapLock)
   985  		c = PageCache(pp.allocToCache())
   986  		unlock(pp.mheapLock)
   987  	})
   988  	return c
   989  }
   990  func (p *PageAlloc) Free(base, npages uintptr) {
   991  	pp := (*pageAlloc)(p)
   992  
   993  	systemstack(func() {
   994  		// None of the tests need any higher-level locking, so we just
   995  		// take the lock internally.
   996  		lock(pp.mheapLock)
   997  		pp.free(base, npages)
   998  		unlock(pp.mheapLock)
   999  	})
  1000  }
  1001  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
  1002  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
  1003  }
  1004  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
  1005  	pp := (*pageAlloc)(p)
  1006  	systemstack(func() {
  1007  		r = pp.scavenge(nbytes, nil, true)
  1008  	})
  1009  	return
  1010  }
  1011  func (p *PageAlloc) InUse() []AddrRange {
  1012  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
  1013  	for _, r := range p.inUse.ranges {
  1014  		ranges = append(ranges, AddrRange{r})
  1015  	}
  1016  	return ranges
  1017  }
  1018  
  1019  // Returns nil if the PallocData's L2 is missing.
  1020  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
  1021  	ci := chunkIdx(i)
  1022  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
  1023  }
  1024  
  1025  // AddrRange is a wrapper around addrRange for testing.
  1026  type AddrRange struct {
  1027  	addrRange
  1028  }
  1029  
  1030  // MakeAddrRange creates a new address range.
  1031  func MakeAddrRange(base, limit uintptr) AddrRange {
  1032  	return AddrRange{makeAddrRange(base, limit)}
  1033  }
  1034  
  1035  // Base returns the virtual base address of the address range.
  1036  func (a AddrRange) Base() uintptr {
  1037  	return a.addrRange.base.addr()
  1038  }
  1039  
  1040  // Base returns the virtual address of the limit of the address range.
  1041  func (a AddrRange) Limit() uintptr {
  1042  	return a.addrRange.limit.addr()
  1043  }
  1044  
  1045  // Equals returns true if the two address ranges are exactly equal.
  1046  func (a AddrRange) Equals(b AddrRange) bool {
  1047  	return a == b
  1048  }
  1049  
  1050  // Size returns the size in bytes of the address range.
  1051  func (a AddrRange) Size() uintptr {
  1052  	return a.addrRange.size()
  1053  }
  1054  
  1055  // testSysStat is the sysStat passed to test versions of various
  1056  // runtime structures. We do actually have to keep track of this
  1057  // because otherwise memstats.mappedReady won't actually line up
  1058  // with other stats in the runtime during tests.
  1059  var testSysStat = &memstats.other_sys
  1060  
  1061  // AddrRanges is a wrapper around addrRanges for testing.
  1062  type AddrRanges struct {
  1063  	addrRanges
  1064  	mutable bool
  1065  }
  1066  
  1067  // NewAddrRanges creates a new empty addrRanges.
  1068  //
  1069  // Note that this initializes addrRanges just like in the
  1070  // runtime, so its memory is persistentalloc'd. Call this
  1071  // function sparingly since the memory it allocates is
  1072  // leaked.
  1073  //
  1074  // This AddrRanges is mutable, so we can test methods like
  1075  // Add.
  1076  func NewAddrRanges() AddrRanges {
  1077  	r := addrRanges{}
  1078  	r.init(testSysStat)
  1079  	return AddrRanges{r, true}
  1080  }
  1081  
  1082  // MakeAddrRanges creates a new addrRanges populated with
  1083  // the ranges in a.
  1084  //
  1085  // The returned AddrRanges is immutable, so methods like
  1086  // Add will fail.
  1087  func MakeAddrRanges(a ...AddrRange) AddrRanges {
  1088  	// Methods that manipulate the backing store of addrRanges.ranges should
  1089  	// not be used on the result from this function (e.g. add) since they may
  1090  	// trigger reallocation. That would normally be fine, except the new
  1091  	// backing store won't come from the heap, but from persistentalloc, so
  1092  	// we'll leak some memory implicitly.
  1093  	ranges := make([]addrRange, 0, len(a))
  1094  	total := uintptr(0)
  1095  	for _, r := range a {
  1096  		ranges = append(ranges, r.addrRange)
  1097  		total += r.Size()
  1098  	}
  1099  	return AddrRanges{addrRanges{
  1100  		ranges:     ranges,
  1101  		totalBytes: total,
  1102  		sysStat:    testSysStat,
  1103  	}, false}
  1104  }
  1105  
  1106  // Ranges returns a copy of the ranges described by the
  1107  // addrRanges.
  1108  func (a *AddrRanges) Ranges() []AddrRange {
  1109  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
  1110  	for _, r := range a.addrRanges.ranges {
  1111  		result = append(result, AddrRange{r})
  1112  	}
  1113  	return result
  1114  }
  1115  
  1116  // FindSucc returns the successor to base. See addrRanges.findSucc
  1117  // for more details.
  1118  func (a *AddrRanges) FindSucc(base uintptr) int {
  1119  	return a.findSucc(base)
  1120  }
  1121  
  1122  // Add adds a new AddrRange to the AddrRanges.
  1123  //
  1124  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
  1125  // otherwise this method will throw.
  1126  func (a *AddrRanges) Add(r AddrRange) {
  1127  	if !a.mutable {
  1128  		throw("attempt to mutate immutable AddrRanges")
  1129  	}
  1130  	a.add(r.addrRange)
  1131  }
  1132  
  1133  // TotalBytes returns the totalBytes field of the addrRanges.
  1134  func (a *AddrRanges) TotalBytes() uintptr {
  1135  	return a.addrRanges.totalBytes
  1136  }
  1137  
  1138  // BitRange represents a range over a bitmap.
  1139  type BitRange struct {
  1140  	I, N uint // bit index and length in bits
  1141  }
  1142  
  1143  // NewPageAlloc creates a new page allocator for testing and
  1144  // initializes it with the scav and chunks maps. Each key in these maps
  1145  // represents a chunk index and each value is a series of bit ranges to
  1146  // set within each bitmap's chunk.
  1147  //
  1148  // The initialization of the pageAlloc preserves the invariant that if a
  1149  // scavenged bit is set the alloc bit is necessarily unset, so some
  1150  // of the bits described by scav may be cleared in the final bitmap if
  1151  // ranges in chunks overlap with them.
  1152  //
  1153  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1154  // (as opposed to all 1s, which it usually is). Furthermore, every
  1155  // chunk index in scav must appear in chunks; ones that do not are
  1156  // ignored.
  1157  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1158  	p := new(pageAlloc)
  1159  
  1160  	// We've got an entry, so initialize the pageAlloc.
  1161  	p.init(new(mutex), testSysStat, true)
  1162  	lockInit(p.mheapLock, lockRankMheap)
  1163  	for i, init := range chunks {
  1164  		addr := chunkBase(chunkIdx(i))
  1165  
  1166  		// Mark the chunk's existence in the pageAlloc.
  1167  		systemstack(func() {
  1168  			lock(p.mheapLock)
  1169  			p.grow(addr, pallocChunkBytes)
  1170  			unlock(p.mheapLock)
  1171  		})
  1172  
  1173  		// Initialize the bitmap and update pageAlloc metadata.
  1174  		ci := chunkIndex(addr)
  1175  		chunk := p.chunkOf(ci)
  1176  
  1177  		// Clear all the scavenged bits which grow set.
  1178  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1179  
  1180  		// Simulate the allocation and subsequent free of all pages in
  1181  		// the chunk for the scavenge index. This sets the state equivalent
  1182  		// with all pages within the index being free.
  1183  		p.scav.index.alloc(ci, pallocChunkPages)
  1184  		p.scav.index.free(ci, 0, pallocChunkPages)
  1185  
  1186  		// Apply scavenge state if applicable.
  1187  		if scav != nil {
  1188  			if scvg, ok := scav[i]; ok {
  1189  				for _, s := range scvg {
  1190  					// Ignore the case of s.N == 0. setRange doesn't handle
  1191  					// it and it's a no-op anyway.
  1192  					if s.N != 0 {
  1193  						chunk.scavenged.setRange(s.I, s.N)
  1194  					}
  1195  				}
  1196  			}
  1197  		}
  1198  
  1199  		// Apply alloc state.
  1200  		for _, s := range init {
  1201  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1202  			// it and it's a no-op anyway.
  1203  			if s.N != 0 {
  1204  				chunk.allocRange(s.I, s.N)
  1205  
  1206  				// Make sure the scavenge index is updated.
  1207  				p.scav.index.alloc(ci, s.N)
  1208  			}
  1209  		}
  1210  
  1211  		// Update heap metadata for the allocRange calls above.
  1212  		systemstack(func() {
  1213  			lock(p.mheapLock)
  1214  			p.update(addr, pallocChunkPages, false, false)
  1215  			unlock(p.mheapLock)
  1216  		})
  1217  	}
  1218  
  1219  	return (*PageAlloc)(p)
  1220  }
  1221  
  1222  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1223  // is called the pageAlloc may no longer be used. The object itself will be
  1224  // collected by the garbage collector once it is no longer live.
  1225  func FreePageAlloc(pp *PageAlloc) {
  1226  	p := (*pageAlloc)(pp)
  1227  
  1228  	// Free all the mapped space for the summary levels.
  1229  	if pageAlloc64Bit != 0 {
  1230  		for l := 0; l < summaryLevels; l++ {
  1231  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1232  		}
  1233  	} else {
  1234  		resSize := uintptr(0)
  1235  		for _, s := range p.summary {
  1236  			resSize += uintptr(cap(s)) * pallocSumBytes
  1237  		}
  1238  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1239  	}
  1240  
  1241  	// Free extra data structures.
  1242  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1243  
  1244  	// Subtract back out whatever we mapped for the summaries.
  1245  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1246  	// (and in anger should actually be accounted for), and there's no other
  1247  	// way to figure out how much we actually mapped.
  1248  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1249  	testSysStat.add(-int64(p.summaryMappedReady))
  1250  
  1251  	// Free the mapped space for chunks.
  1252  	for i := range p.chunks {
  1253  		if x := p.chunks[i]; x != nil {
  1254  			p.chunks[i] = nil
  1255  			// This memory comes from sysAlloc and will always be page-aligned.
  1256  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1257  		}
  1258  	}
  1259  }
  1260  
  1261  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1262  // 64 bit and 32 bit platforms, allowing the tests to share code
  1263  // between the two.
  1264  //
  1265  // This should not be higher than 0x100*pallocChunkBytes to support
  1266  // mips and mipsle, which only have 31-bit address spaces.
  1267  var BaseChunkIdx = func() ChunkIdx {
  1268  	var prefix uintptr
  1269  	if pageAlloc64Bit != 0 {
  1270  		prefix = 0xc000
  1271  	} else {
  1272  		prefix = 0x100
  1273  	}
  1274  	baseAddr := prefix * pallocChunkBytes
  1275  	if goos.IsAix != 0 {
  1276  		baseAddr += arenaBaseOffset
  1277  	}
  1278  	return ChunkIdx(chunkIndex(baseAddr))
  1279  }()
  1280  
  1281  // PageBase returns an address given a chunk index and a page index
  1282  // relative to that chunk.
  1283  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1284  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1285  }
  1286  
  1287  type BitsMismatch struct {
  1288  	Base      uintptr
  1289  	Got, Want uint64
  1290  }
  1291  
  1292  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1293  	ok = true
  1294  
  1295  	// Run on the system stack to avoid stack growth allocation.
  1296  	systemstack(func() {
  1297  		getg().m.mallocing++
  1298  
  1299  		// Lock so that we can safely access the bitmap.
  1300  		lock(&mheap_.lock)
  1301  	chunkLoop:
  1302  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1303  			chunk := mheap_.pages.tryChunkOf(i)
  1304  			if chunk == nil {
  1305  				continue
  1306  			}
  1307  			for j := 0; j < pallocChunkPages/64; j++ {
  1308  				// Run over each 64-bit bitmap section and ensure
  1309  				// scavenged is being cleared properly on allocation.
  1310  				// If a used bit and scavenged bit are both set, that's
  1311  				// an error, and could indicate a larger problem, or
  1312  				// an accounting problem.
  1313  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1314  				got := chunk.scavenged[j]
  1315  				if want != got {
  1316  					ok = false
  1317  					if n >= len(mismatches) {
  1318  						break chunkLoop
  1319  					}
  1320  					mismatches[n] = BitsMismatch{
  1321  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1322  						Got:  got,
  1323  						Want: want,
  1324  					}
  1325  					n++
  1326  				}
  1327  			}
  1328  		}
  1329  		unlock(&mheap_.lock)
  1330  
  1331  		getg().m.mallocing--
  1332  	})
  1333  	return
  1334  }
  1335  
  1336  func PageCachePagesLeaked() (leaked uintptr) {
  1337  	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
  1338  
  1339  	// Walk over destroyed Ps and look for unflushed caches.
  1340  	deadp := allp[len(allp):cap(allp)]
  1341  	for _, p := range deadp {
  1342  		// Since we're going past len(allp) we may see nil Ps.
  1343  		// Just ignore them.
  1344  		if p != nil {
  1345  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1346  		}
  1347  	}
  1348  
  1349  	startTheWorld(stw)
  1350  	return
  1351  }
  1352  
  1353  type Mutex = mutex
  1354  
  1355  var Lock = lock
  1356  var Unlock = unlock
  1357  
  1358  var MutexContended = mutexContended
  1359  
  1360  func SemRootLock(addr *uint32) *mutex {
  1361  	root := semtable.rootFor(addr)
  1362  	return &root.lock
  1363  }
  1364  
  1365  var Semacquire = semacquire
  1366  var Semrelease1 = semrelease1
  1367  
  1368  func SemNwait(addr *uint32) uint32 {
  1369  	root := semtable.rootFor(addr)
  1370  	return root.nwait.Load()
  1371  }
  1372  
  1373  const SemTableSize = semTabSize
  1374  
  1375  // SemTable is a wrapper around semTable exported for testing.
  1376  type SemTable struct {
  1377  	semTable
  1378  }
  1379  
  1380  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1381  func (t *SemTable) Enqueue(addr *uint32) {
  1382  	s := acquireSudog()
  1383  	s.releasetime = 0
  1384  	s.acquiretime = 0
  1385  	s.ticket = 0
  1386  	t.semTable.rootFor(addr).queue(addr, s, false)
  1387  }
  1388  
  1389  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1390  //
  1391  // Returns true if there actually was a waiter to be dequeued.
  1392  func (t *SemTable) Dequeue(addr *uint32) bool {
  1393  	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
  1394  	if s != nil {
  1395  		releaseSudog(s)
  1396  		return true
  1397  	}
  1398  	return false
  1399  }
  1400  
  1401  // mspan wrapper for testing.
  1402  type MSpan mspan
  1403  
  1404  // Allocate an mspan for testing.
  1405  func AllocMSpan() *MSpan {
  1406  	var s *mspan
  1407  	systemstack(func() {
  1408  		lock(&mheap_.lock)
  1409  		s = (*mspan)(mheap_.spanalloc.alloc())
  1410  		unlock(&mheap_.lock)
  1411  	})
  1412  	return (*MSpan)(s)
  1413  }
  1414  
  1415  // Free an allocated mspan.
  1416  func FreeMSpan(s *MSpan) {
  1417  	systemstack(func() {
  1418  		lock(&mheap_.lock)
  1419  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1420  		unlock(&mheap_.lock)
  1421  	})
  1422  }
  1423  
  1424  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1425  	s := (*mspan)(ms)
  1426  	s.nelems = uint16(len(bits) * 8)
  1427  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1428  	result := s.countAlloc()
  1429  	s.gcmarkBits = nil
  1430  	return result
  1431  }
  1432  
  1433  const (
  1434  	TimeHistSubBucketBits = timeHistSubBucketBits
  1435  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1436  	TimeHistNumBuckets    = timeHistNumBuckets
  1437  	TimeHistMinBucketBits = timeHistMinBucketBits
  1438  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1439  )
  1440  
  1441  type TimeHistogram timeHistogram
  1442  
  1443  // Counts returns the counts for the given bucket, subBucket indices.
  1444  // Returns true if the bucket was valid, otherwise returns the counts
  1445  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1446  // bucket < 0, and false.
  1447  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1448  	t := (*timeHistogram)(th)
  1449  	if bucket < 0 {
  1450  		return t.underflow.Load(), false
  1451  	}
  1452  	i := bucket*TimeHistNumSubBuckets + subBucket
  1453  	if i >= len(t.counts) {
  1454  		return t.overflow.Load(), false
  1455  	}
  1456  	return t.counts[i].Load(), true
  1457  }
  1458  
  1459  func (th *TimeHistogram) Record(duration int64) {
  1460  	(*timeHistogram)(th).record(duration)
  1461  }
  1462  
  1463  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1464  
  1465  func SetIntArgRegs(a int) int {
  1466  	lock(&finlock)
  1467  	old := intArgRegs
  1468  	if a >= 0 {
  1469  		intArgRegs = a
  1470  	}
  1471  	unlock(&finlock)
  1472  	return old
  1473  }
  1474  
  1475  func FinalizerGAsleep() bool {
  1476  	return fingStatus.Load()&fingWait != 0
  1477  }
  1478  
  1479  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1480  // extra layer of call, since then there's a return before the "real"
  1481  // next call.
  1482  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1483  
  1484  // For GCTestIsReachable, it's important that we do this as a call so
  1485  // escape analysis can see through it.
  1486  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1487  	return gcTestIsReachable(ptrs...)
  1488  }
  1489  
  1490  // For GCTestPointerClass, it's important that we do this as a call so
  1491  // escape analysis can see through it.
  1492  //
  1493  // This is nosplit because gcTestPointerClass is.
  1494  //
  1495  //go:nosplit
  1496  func GCTestPointerClass(p unsafe.Pointer) string {
  1497  	return gcTestPointerClass(p)
  1498  }
  1499  
  1500  const Raceenabled = raceenabled
  1501  
  1502  const (
  1503  	GCBackgroundUtilization            = gcBackgroundUtilization
  1504  	GCGoalUtilization                  = gcGoalUtilization
  1505  	DefaultHeapMinimum                 = defaultHeapMinimum
  1506  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1507  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1508  )
  1509  
  1510  type GCController struct {
  1511  	gcControllerState
  1512  }
  1513  
  1514  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1515  	// Force the controller to escape. We're going to
  1516  	// do 64-bit atomics on it, and if it gets stack-allocated
  1517  	// on a 32-bit architecture, it may get allocated unaligned
  1518  	// space.
  1519  	g := Escape(new(GCController))
  1520  	g.gcControllerState.test = true // Mark it as a test copy.
  1521  	g.init(int32(gcPercent), memoryLimit)
  1522  	return g
  1523  }
  1524  
  1525  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1526  	trigger, _ := c.trigger()
  1527  	if c.heapMarked > trigger {
  1528  		trigger = c.heapMarked
  1529  	}
  1530  	c.maxStackScan.Store(stackSize)
  1531  	c.globalsScan.Store(globalsSize)
  1532  	c.heapLive.Store(trigger)
  1533  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1534  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1535  }
  1536  
  1537  func (c *GCController) AssistWorkPerByte() float64 {
  1538  	return c.assistWorkPerByte.Load()
  1539  }
  1540  
  1541  func (c *GCController) HeapGoal() uint64 {
  1542  	return c.heapGoal()
  1543  }
  1544  
  1545  func (c *GCController) HeapLive() uint64 {
  1546  	return c.heapLive.Load()
  1547  }
  1548  
  1549  func (c *GCController) HeapMarked() uint64 {
  1550  	return c.heapMarked
  1551  }
  1552  
  1553  func (c *GCController) Triggered() uint64 {
  1554  	return c.triggered
  1555  }
  1556  
  1557  type GCControllerReviseDelta struct {
  1558  	HeapLive        int64
  1559  	HeapScan        int64
  1560  	HeapScanWork    int64
  1561  	StackScanWork   int64
  1562  	GlobalsScanWork int64
  1563  }
  1564  
  1565  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1566  	c.heapLive.Add(d.HeapLive)
  1567  	c.heapScan.Add(d.HeapScan)
  1568  	c.heapScanWork.Add(d.HeapScanWork)
  1569  	c.stackScanWork.Add(d.StackScanWork)
  1570  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1571  	c.revise()
  1572  }
  1573  
  1574  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1575  	c.assistTime.Store(assistTime)
  1576  	c.endCycle(elapsed, gomaxprocs, false)
  1577  	c.resetLive(bytesMarked)
  1578  	c.commit(false)
  1579  }
  1580  
  1581  func (c *GCController) AddIdleMarkWorker() bool {
  1582  	return c.addIdleMarkWorker()
  1583  }
  1584  
  1585  func (c *GCController) NeedIdleMarkWorker() bool {
  1586  	return c.needIdleMarkWorker()
  1587  }
  1588  
  1589  func (c *GCController) RemoveIdleMarkWorker() {
  1590  	c.removeIdleMarkWorker()
  1591  }
  1592  
  1593  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1594  	c.setMaxIdleMarkWorkers(max)
  1595  }
  1596  
  1597  var alwaysFalse bool
  1598  var escapeSink any
  1599  
  1600  func Escape[T any](x T) T {
  1601  	if alwaysFalse {
  1602  		escapeSink = x
  1603  	}
  1604  	return x
  1605  }
  1606  
  1607  // Acquirem blocks preemption.
  1608  func Acquirem() {
  1609  	acquirem()
  1610  }
  1611  
  1612  func Releasem() {
  1613  	releasem(getg().m)
  1614  }
  1615  
  1616  var Timediv = timediv
  1617  
  1618  type PIController struct {
  1619  	piController
  1620  }
  1621  
  1622  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1623  	return &PIController{piController{
  1624  		kp:  kp,
  1625  		ti:  ti,
  1626  		tt:  tt,
  1627  		min: min,
  1628  		max: max,
  1629  	}}
  1630  }
  1631  
  1632  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1633  	return c.piController.next(input, setpoint, period)
  1634  }
  1635  
  1636  const (
  1637  	CapacityPerProc          = capacityPerProc
  1638  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1639  )
  1640  
  1641  type GCCPULimiter struct {
  1642  	limiter gcCPULimiterState
  1643  }
  1644  
  1645  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1646  	// Force the controller to escape. We're going to
  1647  	// do 64-bit atomics on it, and if it gets stack-allocated
  1648  	// on a 32-bit architecture, it may get allocated unaligned
  1649  	// space.
  1650  	l := Escape(new(GCCPULimiter))
  1651  	l.limiter.test = true
  1652  	l.limiter.resetCapacity(now, gomaxprocs)
  1653  	return l
  1654  }
  1655  
  1656  func (l *GCCPULimiter) Fill() uint64 {
  1657  	return l.limiter.bucket.fill
  1658  }
  1659  
  1660  func (l *GCCPULimiter) Capacity() uint64 {
  1661  	return l.limiter.bucket.capacity
  1662  }
  1663  
  1664  func (l *GCCPULimiter) Overflow() uint64 {
  1665  	return l.limiter.overflow
  1666  }
  1667  
  1668  func (l *GCCPULimiter) Limiting() bool {
  1669  	return l.limiter.limiting()
  1670  }
  1671  
  1672  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1673  	return l.limiter.needUpdate(now)
  1674  }
  1675  
  1676  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1677  	l.limiter.startGCTransition(enableGC, now)
  1678  }
  1679  
  1680  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1681  	l.limiter.finishGCTransition(now)
  1682  }
  1683  
  1684  func (l *GCCPULimiter) Update(now int64) {
  1685  	l.limiter.update(now)
  1686  }
  1687  
  1688  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1689  	l.limiter.addAssistTime(t)
  1690  }
  1691  
  1692  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1693  	l.limiter.resetCapacity(now, nprocs)
  1694  }
  1695  
  1696  const ScavengePercent = scavengePercent
  1697  
  1698  type Scavenger struct {
  1699  	Sleep      func(int64) int64
  1700  	Scavenge   func(uintptr) (uintptr, int64)
  1701  	ShouldStop func() bool
  1702  	GoMaxProcs func() int32
  1703  
  1704  	released  atomic.Uintptr
  1705  	scavenger scavengerState
  1706  	stop      chan<- struct{}
  1707  	done      <-chan struct{}
  1708  }
  1709  
  1710  func (s *Scavenger) Start() {
  1711  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1712  		panic("must populate all stubs")
  1713  	}
  1714  
  1715  	// Install hooks.
  1716  	s.scavenger.sleepStub = s.Sleep
  1717  	s.scavenger.scavenge = s.Scavenge
  1718  	s.scavenger.shouldStop = s.ShouldStop
  1719  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1720  
  1721  	// Start up scavenger goroutine, and wait for it to be ready.
  1722  	stop := make(chan struct{})
  1723  	s.stop = stop
  1724  	done := make(chan struct{})
  1725  	s.done = done
  1726  	go func() {
  1727  		// This should match bgscavenge, loosely.
  1728  		s.scavenger.init()
  1729  		s.scavenger.park()
  1730  		for {
  1731  			select {
  1732  			case <-stop:
  1733  				close(done)
  1734  				return
  1735  			default:
  1736  			}
  1737  			released, workTime := s.scavenger.run()
  1738  			if released == 0 {
  1739  				s.scavenger.park()
  1740  				continue
  1741  			}
  1742  			s.released.Add(released)
  1743  			s.scavenger.sleep(workTime)
  1744  		}
  1745  	}()
  1746  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1747  		panic("timed out waiting for scavenger to get ready")
  1748  	}
  1749  }
  1750  
  1751  // BlockUntilParked blocks until the scavenger parks, or until
  1752  // timeout is exceeded. Returns true if the scavenger parked.
  1753  //
  1754  // Note that in testing, parked means something slightly different.
  1755  // In anger, the scavenger parks to sleep, too, but in testing,
  1756  // it only parks when it actually has no work to do.
  1757  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1758  	// Just spin, waiting for it to park.
  1759  	//
  1760  	// The actual parking process is racy with respect to
  1761  	// wakeups, which is fine, but for testing we need something
  1762  	// a bit more robust.
  1763  	start := nanotime()
  1764  	for nanotime()-start < timeout {
  1765  		lock(&s.scavenger.lock)
  1766  		parked := s.scavenger.parked
  1767  		unlock(&s.scavenger.lock)
  1768  		if parked {
  1769  			return true
  1770  		}
  1771  		Gosched()
  1772  	}
  1773  	return false
  1774  }
  1775  
  1776  // Released returns how many bytes the scavenger released.
  1777  func (s *Scavenger) Released() uintptr {
  1778  	return s.released.Load()
  1779  }
  1780  
  1781  // Wake wakes up a parked scavenger to keep running.
  1782  func (s *Scavenger) Wake() {
  1783  	s.scavenger.wake()
  1784  }
  1785  
  1786  // Stop cleans up the scavenger's resources. The scavenger
  1787  // must be parked for this to work.
  1788  func (s *Scavenger) Stop() {
  1789  	lock(&s.scavenger.lock)
  1790  	parked := s.scavenger.parked
  1791  	unlock(&s.scavenger.lock)
  1792  	if !parked {
  1793  		panic("tried to clean up scavenger that is not parked")
  1794  	}
  1795  	close(s.stop)
  1796  	s.Wake()
  1797  	<-s.done
  1798  }
  1799  
  1800  type ScavengeIndex struct {
  1801  	i scavengeIndex
  1802  }
  1803  
  1804  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1805  	s := new(ScavengeIndex)
  1806  	// This is a bit lazy but we easily guarantee we'll be able
  1807  	// to reference all the relevant chunks. The worst-case
  1808  	// memory usage here is 512 MiB, but tests generally use
  1809  	// small offsets from BaseChunkIdx, which results in ~100s
  1810  	// of KiB in memory use.
  1811  	//
  1812  	// This may still be worth making better, at least by sharing
  1813  	// this fairly large array across calls with a sync.Pool or
  1814  	// something. Currently, when the tests are run serially,
  1815  	// it takes around 0.5s. Not all that much, but if we have
  1816  	// a lot of tests like this it could add up.
  1817  	s.i.chunks = make([]atomicScavChunkData, max)
  1818  	s.i.min.Store(uintptr(min))
  1819  	s.i.max.Store(uintptr(max))
  1820  	s.i.minHeapIdx.Store(uintptr(min))
  1821  	s.i.test = true
  1822  	return s
  1823  }
  1824  
  1825  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1826  	ci, off := s.i.find(force)
  1827  	return ChunkIdx(ci), off
  1828  }
  1829  
  1830  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1831  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1832  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1833  
  1834  	if sc == ec {
  1835  		// The range doesn't cross any chunk boundaries.
  1836  		s.i.alloc(sc, ei+1-si)
  1837  	} else {
  1838  		// The range crosses at least one chunk boundary.
  1839  		s.i.alloc(sc, pallocChunkPages-si)
  1840  		for c := sc + 1; c < ec; c++ {
  1841  			s.i.alloc(c, pallocChunkPages)
  1842  		}
  1843  		s.i.alloc(ec, ei+1)
  1844  	}
  1845  }
  1846  
  1847  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1848  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1849  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1850  
  1851  	if sc == ec {
  1852  		// The range doesn't cross any chunk boundaries.
  1853  		s.i.free(sc, si, ei+1-si)
  1854  	} else {
  1855  		// The range crosses at least one chunk boundary.
  1856  		s.i.free(sc, si, pallocChunkPages-si)
  1857  		for c := sc + 1; c < ec; c++ {
  1858  			s.i.free(c, 0, pallocChunkPages)
  1859  		}
  1860  		s.i.free(ec, 0, ei+1)
  1861  	}
  1862  }
  1863  
  1864  func (s *ScavengeIndex) ResetSearchAddrs() {
  1865  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1866  		addr, marked := a.Load()
  1867  		if marked {
  1868  			a.StoreUnmark(addr, addr)
  1869  		}
  1870  		a.Clear()
  1871  	}
  1872  	s.i.freeHWM = minOffAddr
  1873  }
  1874  
  1875  func (s *ScavengeIndex) NextGen() {
  1876  	s.i.nextGen()
  1877  }
  1878  
  1879  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1880  	s.i.setEmpty(chunkIdx(ci))
  1881  }
  1882  
  1883  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1884  	sc0 := scavChunkData{
  1885  		gen:            gen,
  1886  		inUse:          inUse,
  1887  		lastInUse:      lastInUse,
  1888  		scavChunkFlags: scavChunkFlags(flags),
  1889  	}
  1890  	scp := sc0.pack()
  1891  	sc1 := unpackScavChunkData(scp)
  1892  	return sc0 == sc1
  1893  }
  1894  
  1895  const GTrackingPeriod = gTrackingPeriod
  1896  
  1897  var ZeroBase = unsafe.Pointer(&zerobase)
  1898  
  1899  const UserArenaChunkBytes = userArenaChunkBytes
  1900  
  1901  type UserArena struct {
  1902  	arena *userArena
  1903  }
  1904  
  1905  func NewUserArena() *UserArena {
  1906  	return &UserArena{newUserArena()}
  1907  }
  1908  
  1909  func (a *UserArena) New(out *any) {
  1910  	i := efaceOf(out)
  1911  	typ := i._type
  1912  	if typ.Kind_&kindMask != kindPtr {
  1913  		panic("new result of non-ptr type")
  1914  	}
  1915  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1916  	i.data = a.arena.new(typ)
  1917  }
  1918  
  1919  func (a *UserArena) Slice(sl any, cap int) {
  1920  	a.arena.slice(sl, cap)
  1921  }
  1922  
  1923  func (a *UserArena) Free() {
  1924  	a.arena.free()
  1925  }
  1926  
  1927  func GlobalWaitingArenaChunks() int {
  1928  	n := 0
  1929  	systemstack(func() {
  1930  		lock(&mheap_.lock)
  1931  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1932  			n++
  1933  		}
  1934  		unlock(&mheap_.lock)
  1935  	})
  1936  	return n
  1937  }
  1938  
  1939  func UserArenaClone[T any](s T) T {
  1940  	return arena_heapify(s).(T)
  1941  }
  1942  
  1943  var AlignUp = alignUp
  1944  
  1945  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1946  	return blockUntilEmptyFinalizerQueue(timeout)
  1947  }
  1948  
  1949  func FrameStartLine(f *Frame) int {
  1950  	return f.startLine
  1951  }
  1952  
  1953  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1954  // This memory will never be freed; use sparingly.
  1955  func PersistentAlloc(n uintptr) unsafe.Pointer {
  1956  	return persistentalloc(n, 0, &memstats.other_sys)
  1957  }
  1958  
  1959  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1960  // pcBuf with the return addresses of the physical frames on the stack.
  1961  func FPCallers(pcBuf []uintptr) int {
  1962  	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
  1963  }
  1964  
  1965  const FramePointerEnabled = framepointer_enabled
  1966  
  1967  var (
  1968  	IsPinned      = isPinned
  1969  	GetPinCounter = pinnerGetPinCounter
  1970  )
  1971  
  1972  func SetPinnerLeakPanic(f func()) {
  1973  	pinnerLeakPanic = f
  1974  }
  1975  func GetPinnerLeakPanic() func() {
  1976  	return pinnerLeakPanic
  1977  }
  1978  
  1979  var testUintptr uintptr
  1980  
  1981  func MyGenericFunc[T any]() {
  1982  	systemstack(func() {
  1983  		testUintptr = 4
  1984  	})
  1985  }
  1986  
  1987  func UnsafePoint(pc uintptr) bool {
  1988  	fi := findfunc(pc)
  1989  	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
  1990  	switch v {
  1991  	case abi.UnsafePointUnsafe:
  1992  		return true
  1993  	case abi.UnsafePointSafe:
  1994  		return false
  1995  	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
  1996  		// These are all interruptible, they just encode a nonstandard
  1997  		// way of recovering when interrupted.
  1998  		return false
  1999  	default:
  2000  		var buf [20]byte
  2001  		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
  2002  	}
  2003  }