github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  var Fadd64 = fadd64
    18  var Fsub64 = fsub64
    19  var Fmul64 = fmul64
    20  var Fdiv64 = fdiv64
    21  var F64to32 = f64to32
    22  var F32to64 = f32to64
    23  var Fcmp64 = fcmp64
    24  var Fintto64 = fintto64
    25  var F64toint = f64toint
    26  
    27  var Entersyscall = entersyscall
    28  var Exitsyscall = exitsyscall
    29  var LockedOSThread = lockedOSThread
    30  var Xadduintptr = atomic.Xadduintptr
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  var ParseByteCount = parseByteCount
    37  
    38  var Nanotime = nanotime
    39  var NetpollBreak = netpollBreak
    40  var Usleep = usleep
    41  
    42  var PhysPageSize = physPageSize
    43  var PhysHugePageSize = physHugePageSize
    44  
    45  var NetpollGenericInit = netpollGenericInit
    46  
    47  var Memmove = memmove
    48  var MemclrNoHeapPointers = memclrNoHeapPointers
    49  
    50  const TracebackInnerFrames = tracebackInnerFrames
    51  const TracebackOuterFrames = tracebackOuterFrames
    52  
    53  var LockPartialOrder = lockPartialOrder
    54  
    55  type LockRank lockRank
    56  
    57  func (l LockRank) String() string {
    58  	return lockRank(l).String()
    59  }
    60  
    61  const PreemptMSupported = preemptMSupported
    62  
    63  type LFNode struct {
    64  	Next    uint64
    65  	Pushcnt uintptr
    66  }
    67  
    68  func LFStackPush(head *uint64, node *LFNode) {
    69  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    70  }
    71  
    72  func LFStackPop(head *uint64) *LFNode {
    73  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    74  }
    75  func LFNodeValidate(node *LFNode) {
    76  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    77  }
    78  
    79  func Netpoll(delta int64) {
    80  	systemstack(func() {
    81  		netpoll(delta)
    82  	})
    83  }
    84  
    85  func GCMask(x any) (ret []byte) {
    86  	systemstack(func() {
    87  		ret = getgcmask(x)
    88  	})
    89  	return
    90  }
    91  
    92  func RunSchedLocalQueueTest() {
    93  	pp := new(p)
    94  	gs := make([]g, len(pp.runq))
    95  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
    96  	for i := 0; i < len(pp.runq); i++ {
    97  		if g, _ := runqget(pp); g != nil {
    98  			throw("runq is not empty initially")
    99  		}
   100  		for j := 0; j < i; j++ {
   101  			runqput(pp, &gs[i], false)
   102  		}
   103  		for j := 0; j < i; j++ {
   104  			if g, _ := runqget(pp); g != &gs[i] {
   105  				print("bad element at iter ", i, "/", j, "\n")
   106  				throw("bad element")
   107  			}
   108  		}
   109  		if g, _ := runqget(pp); g != nil {
   110  			throw("runq is not empty afterwards")
   111  		}
   112  	}
   113  }
   114  
   115  func RunSchedLocalQueueStealTest() {
   116  	p1 := new(p)
   117  	p2 := new(p)
   118  	gs := make([]g, len(p1.runq))
   119  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   120  	for i := 0; i < len(p1.runq); i++ {
   121  		for j := 0; j < i; j++ {
   122  			gs[j].sig = 0
   123  			runqput(p1, &gs[j], false)
   124  		}
   125  		gp := runqsteal(p2, p1, true)
   126  		s := 0
   127  		if gp != nil {
   128  			s++
   129  			gp.sig++
   130  		}
   131  		for {
   132  			gp, _ = runqget(p2)
   133  			if gp == nil {
   134  				break
   135  			}
   136  			s++
   137  			gp.sig++
   138  		}
   139  		for {
   140  			gp, _ = runqget(p1)
   141  			if gp == nil {
   142  				break
   143  			}
   144  			gp.sig++
   145  		}
   146  		for j := 0; j < i; j++ {
   147  			if gs[j].sig != 1 {
   148  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   149  				throw("bad element")
   150  			}
   151  		}
   152  		if s != i/2 && s != i/2+1 {
   153  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   154  			throw("bad steal")
   155  		}
   156  	}
   157  }
   158  
   159  func RunSchedLocalQueueEmptyTest(iters int) {
   160  	// Test that runq is not spuriously reported as empty.
   161  	// Runq emptiness affects scheduling decisions and spurious emptiness
   162  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   163  	// for arbitrary long time).
   164  	done := make(chan bool, 1)
   165  	p := new(p)
   166  	gs := make([]g, 2)
   167  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   168  	ready := new(uint32)
   169  	for i := 0; i < iters; i++ {
   170  		*ready = 0
   171  		next0 := (i & 1) == 0
   172  		next1 := (i & 2) == 0
   173  		runqput(p, &gs[0], next0)
   174  		go func() {
   175  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   176  			}
   177  			if runqempty(p) {
   178  				println("next:", next0, next1)
   179  				throw("queue is empty")
   180  			}
   181  			done <- true
   182  		}()
   183  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   184  		}
   185  		runqput(p, &gs[1], next1)
   186  		runqget(p)
   187  		<-done
   188  		runqget(p)
   189  	}
   190  }
   191  
   192  var (
   193  	StringHash = stringHash
   194  	BytesHash  = bytesHash
   195  	Int32Hash  = int32Hash
   196  	Int64Hash  = int64Hash
   197  	MemHash    = memhash
   198  	MemHash32  = memhash32
   199  	MemHash64  = memhash64
   200  	EfaceHash  = efaceHash
   201  	IfaceHash  = ifaceHash
   202  )
   203  
   204  var UseAeshash = &useAeshash
   205  
   206  func MemclrBytes(b []byte) {
   207  	s := (*slice)(unsafe.Pointer(&b))
   208  	memclrNoHeapPointers(s.array, uintptr(s.len))
   209  }
   210  
   211  const HashLoad = hashLoad
   212  
   213  // entry point for testing
   214  func GostringW(w []uint16) (s string) {
   215  	systemstack(func() {
   216  		s = gostringw(&w[0])
   217  	})
   218  	return
   219  }
   220  
   221  var Open = open
   222  var Close = closefd
   223  var Read = read
   224  var Write = write
   225  
   226  func Envs() []string     { return envs }
   227  func SetEnvs(e []string) { envs = e }
   228  
   229  // For benchmarking.
   230  
   231  func BenchSetType(n int, x any) {
   232  	// Escape x to ensure it is allocated on the heap, as we are
   233  	// working on the heap bits here.
   234  	Escape(x)
   235  	e := *efaceOf(&x)
   236  	t := e._type
   237  	var size uintptr
   238  	var p unsafe.Pointer
   239  	switch t.Kind_ & kindMask {
   240  	case kindPtr:
   241  		t = (*ptrtype)(unsafe.Pointer(t)).Elem
   242  		size = t.Size_
   243  		p = e.data
   244  	case kindSlice:
   245  		slice := *(*struct {
   246  			ptr      unsafe.Pointer
   247  			len, cap uintptr
   248  		})(e.data)
   249  		t = (*slicetype)(unsafe.Pointer(t)).Elem
   250  		size = t.Size_ * slice.len
   251  		p = slice.ptr
   252  	}
   253  	allocSize := roundupsize(size)
   254  	systemstack(func() {
   255  		for i := 0; i < n; i++ {
   256  			heapBitsSetType(uintptr(p), allocSize, size, t)
   257  		}
   258  	})
   259  }
   260  
   261  const PtrSize = goarch.PtrSize
   262  
   263  var ForceGCPeriod = &forcegcperiod
   264  
   265  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   266  // the "environment" traceback level, so later calls to
   267  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   268  func SetTracebackEnv(level string) {
   269  	setTraceback(level)
   270  	traceback_env = traceback_cache
   271  }
   272  
   273  var ReadUnaligned32 = readUnaligned32
   274  var ReadUnaligned64 = readUnaligned64
   275  
   276  func CountPagesInUse() (pagesInUse, counted uintptr) {
   277  	stopTheWorld("CountPagesInUse")
   278  
   279  	pagesInUse = uintptr(mheap_.pagesInUse.Load())
   280  
   281  	for _, s := range mheap_.allspans {
   282  		if s.state.get() == mSpanInUse {
   283  			counted += s.npages
   284  		}
   285  	}
   286  
   287  	startTheWorld()
   288  
   289  	return
   290  }
   291  
   292  func Fastrand() uint32          { return fastrand() }
   293  func Fastrand64() uint64        { return fastrand64() }
   294  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   295  
   296  type ProfBuf profBuf
   297  
   298  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   299  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   300  }
   301  
   302  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   303  	(*profBuf)(p).write(tag, now, hdr, stk)
   304  }
   305  
   306  const (
   307  	ProfBufBlocking    = profBufBlocking
   308  	ProfBufNonBlocking = profBufNonBlocking
   309  )
   310  
   311  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   312  	return (*profBuf)(p).read(profBufReadMode(mode))
   313  }
   314  
   315  func (p *ProfBuf) Close() {
   316  	(*profBuf)(p).close()
   317  }
   318  
   319  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   320  	stopTheWorld("ReadMetricsSlow")
   321  
   322  	// Initialize the metrics beforehand because this could
   323  	// allocate and skew the stats.
   324  	metricsLock()
   325  	initMetrics()
   326  	metricsUnlock()
   327  
   328  	systemstack(func() {
   329  		// Read memstats first. It's going to flush
   330  		// the mcaches which readMetrics does not do, so
   331  		// going the other way around may result in
   332  		// inconsistent statistics.
   333  		readmemstats_m(memStats)
   334  	})
   335  
   336  	// Read metrics off the system stack.
   337  	//
   338  	// The only part of readMetrics that could allocate
   339  	// and skew the stats is initMetrics.
   340  	readMetrics(samplesp, len, cap)
   341  
   342  	startTheWorld()
   343  }
   344  
   345  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   346  // MemStats accumulated by scanning the heap.
   347  func ReadMemStatsSlow() (base, slow MemStats) {
   348  	stopTheWorld("ReadMemStatsSlow")
   349  
   350  	// Run on the system stack to avoid stack growth allocation.
   351  	systemstack(func() {
   352  		// Make sure stats don't change.
   353  		getg().m.mallocing++
   354  
   355  		readmemstats_m(&base)
   356  
   357  		// Initialize slow from base and zero the fields we're
   358  		// recomputing.
   359  		slow = base
   360  		slow.Alloc = 0
   361  		slow.TotalAlloc = 0
   362  		slow.Mallocs = 0
   363  		slow.Frees = 0
   364  		slow.HeapReleased = 0
   365  		var bySize [_NumSizeClasses]struct {
   366  			Mallocs, Frees uint64
   367  		}
   368  
   369  		// Add up current allocations in spans.
   370  		for _, s := range mheap_.allspans {
   371  			if s.state.get() != mSpanInUse {
   372  				continue
   373  			}
   374  			if s.isUnusedUserArenaChunk() {
   375  				continue
   376  			}
   377  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   378  				slow.Mallocs++
   379  				slow.Alloc += uint64(s.elemsize)
   380  			} else {
   381  				slow.Mallocs += uint64(s.allocCount)
   382  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   383  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   384  			}
   385  		}
   386  
   387  		// Add in frees by just reading the stats for those directly.
   388  		var m heapStatsDelta
   389  		memstats.heapStats.unsafeRead(&m)
   390  
   391  		// Collect per-sizeclass free stats.
   392  		var smallFree uint64
   393  		for i := 0; i < _NumSizeClasses; i++ {
   394  			slow.Frees += uint64(m.smallFreeCount[i])
   395  			bySize[i].Frees += uint64(m.smallFreeCount[i])
   396  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
   397  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
   398  		}
   399  		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
   400  		slow.Mallocs += slow.Frees
   401  
   402  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
   403  
   404  		for i := range slow.BySize {
   405  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   406  			slow.BySize[i].Frees = bySize[i].Frees
   407  		}
   408  
   409  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   410  			chunk := mheap_.pages.tryChunkOf(i)
   411  			if chunk == nil {
   412  				continue
   413  			}
   414  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   415  			slow.HeapReleased += uint64(pg) * pageSize
   416  		}
   417  		for _, p := range allp {
   418  			pg := sys.OnesCount64(p.pcache.scav)
   419  			slow.HeapReleased += uint64(pg) * pageSize
   420  		}
   421  
   422  		getg().m.mallocing--
   423  	})
   424  
   425  	startTheWorld()
   426  	return
   427  }
   428  
   429  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   430  // and verifies that unwinding the new stack doesn't crash, even if the old
   431  // stack has been freed or reused (simulated via poisoning).
   432  func ShrinkStackAndVerifyFramePointers() {
   433  	before := stackPoisonCopy
   434  	defer func() { stackPoisonCopy = before }()
   435  	stackPoisonCopy = 1
   436  
   437  	gp := getg()
   438  	systemstack(func() {
   439  		shrinkstack(gp)
   440  	})
   441  	// If our new stack contains frame pointers into the old stack, this will
   442  	// crash because the old stack has been poisoned.
   443  	FPCallers(make([]uintptr, 1024))
   444  }
   445  
   446  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   447  // stderr, and blocks in a stack containing
   448  // "runtime.blockOnSystemStackInternal".
   449  func BlockOnSystemStack() {
   450  	systemstack(blockOnSystemStackInternal)
   451  }
   452  
   453  func blockOnSystemStackInternal() {
   454  	print("x\n")
   455  	lock(&deadlock)
   456  	lock(&deadlock)
   457  }
   458  
   459  type RWMutex struct {
   460  	rw rwmutex
   461  }
   462  
   463  func (rw *RWMutex) RLock() {
   464  	rw.rw.rlock()
   465  }
   466  
   467  func (rw *RWMutex) RUnlock() {
   468  	rw.rw.runlock()
   469  }
   470  
   471  func (rw *RWMutex) Lock() {
   472  	rw.rw.lock()
   473  }
   474  
   475  func (rw *RWMutex) Unlock() {
   476  	rw.rw.unlock()
   477  }
   478  
   479  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   480  
   481  func MapBucketsCount(m map[int]int) int {
   482  	h := *(**hmap)(unsafe.Pointer(&m))
   483  	return 1 << h.B
   484  }
   485  
   486  func MapBucketsPointerIsNil(m map[int]int) bool {
   487  	h := *(**hmap)(unsafe.Pointer(&m))
   488  	return h.buckets == nil
   489  }
   490  
   491  func LockOSCounts() (external, internal uint32) {
   492  	gp := getg()
   493  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   494  		if gp.lockedm != 0 {
   495  			panic("lockedm on non-locked goroutine")
   496  		}
   497  	} else {
   498  		if gp.lockedm == 0 {
   499  			panic("nil lockedm on locked goroutine")
   500  		}
   501  	}
   502  	return gp.m.lockedExt, gp.m.lockedInt
   503  }
   504  
   505  //go:noinline
   506  func TracebackSystemstack(stk []uintptr, i int) int {
   507  	if i == 0 {
   508  		pc, sp := getcallerpc(), getcallersp()
   509  		var u unwinder
   510  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   511  		return tracebackPCs(&u, 0, stk)
   512  	}
   513  	n := 0
   514  	systemstack(func() {
   515  		n = TracebackSystemstack(stk, i-1)
   516  	})
   517  	return n
   518  }
   519  
   520  func KeepNArenaHints(n int) {
   521  	hint := mheap_.arenaHints
   522  	for i := 1; i < n; i++ {
   523  		hint = hint.next
   524  		if hint == nil {
   525  			return
   526  		}
   527  	}
   528  	hint.next = nil
   529  }
   530  
   531  // MapNextArenaHint reserves a page at the next arena growth hint,
   532  // preventing the arena from growing there, and returns the range of
   533  // addresses that are no longer viable.
   534  //
   535  // This may fail to reserve memory. If it fails, it still returns the
   536  // address range it attempted to reserve.
   537  func MapNextArenaHint() (start, end uintptr, ok bool) {
   538  	hint := mheap_.arenaHints
   539  	addr := hint.addr
   540  	if hint.down {
   541  		start, end = addr-heapArenaBytes, addr
   542  		addr -= physPageSize
   543  	} else {
   544  		start, end = addr, addr+heapArenaBytes
   545  	}
   546  	got := sysReserve(unsafe.Pointer(addr), physPageSize)
   547  	ok = (addr == uintptr(got))
   548  	if !ok {
   549  		// We were unable to get the requested reservation.
   550  		// Release what we did get and fail.
   551  		sysFreeOS(got, physPageSize)
   552  	}
   553  	return
   554  }
   555  
   556  func GetNextArenaHint() uintptr {
   557  	return mheap_.arenaHints.addr
   558  }
   559  
   560  type G = g
   561  
   562  type Sudog = sudog
   563  
   564  func Getg() *G {
   565  	return getg()
   566  }
   567  
   568  func Goid() uint64 {
   569  	return getg().goid
   570  }
   571  
   572  func GIsWaitingOnMutex(gp *G) bool {
   573  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   574  }
   575  
   576  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   577  
   578  //go:noinline
   579  func PanicForTesting(b []byte, i int) byte {
   580  	return unexportedPanicForTesting(b, i)
   581  }
   582  
   583  //go:noinline
   584  func unexportedPanicForTesting(b []byte, i int) byte {
   585  	return b[i]
   586  }
   587  
   588  func G0StackOverflow() {
   589  	systemstack(func() {
   590  		stackOverflow(nil)
   591  	})
   592  }
   593  
   594  func stackOverflow(x *byte) {
   595  	var buf [256]byte
   596  	stackOverflow(&buf[0])
   597  }
   598  
   599  func MapTombstoneCheck(m map[int]int) {
   600  	// Make sure emptyOne and emptyRest are distributed correctly.
   601  	// We should have a series of filled and emptyOne cells, followed by
   602  	// a series of emptyRest cells.
   603  	h := *(**hmap)(unsafe.Pointer(&m))
   604  	i := any(m)
   605  	t := *(**maptype)(unsafe.Pointer(&i))
   606  
   607  	for x := 0; x < 1<<h.B; x++ {
   608  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
   609  		n := 0
   610  		for b := b0; b != nil; b = b.overflow(t) {
   611  			for i := 0; i < bucketCnt; i++ {
   612  				if b.tophash[i] != emptyRest {
   613  					n++
   614  				}
   615  			}
   616  		}
   617  		k := 0
   618  		for b := b0; b != nil; b = b.overflow(t) {
   619  			for i := 0; i < bucketCnt; i++ {
   620  				if k < n && b.tophash[i] == emptyRest {
   621  					panic("early emptyRest")
   622  				}
   623  				if k >= n && b.tophash[i] != emptyRest {
   624  					panic("late non-emptyRest")
   625  				}
   626  				if k == n-1 && b.tophash[i] == emptyOne {
   627  					panic("last non-emptyRest entry is emptyOne")
   628  				}
   629  				k++
   630  			}
   631  		}
   632  	}
   633  }
   634  
   635  func RunGetgThreadSwitchTest() {
   636  	// Test that getg works correctly with thread switch.
   637  	// With gccgo, if we generate getg inlined, the backend
   638  	// may cache the address of the TLS variable, which
   639  	// will become invalid after a thread switch. This test
   640  	// checks that the bad caching doesn't happen.
   641  
   642  	ch := make(chan int)
   643  	go func(ch chan int) {
   644  		ch <- 5
   645  		LockOSThread()
   646  	}(ch)
   647  
   648  	g1 := getg()
   649  
   650  	// Block on a receive. This is likely to get us a thread
   651  	// switch. If we yield to the sender goroutine, it will
   652  	// lock the thread, forcing us to resume on a different
   653  	// thread.
   654  	<-ch
   655  
   656  	g2 := getg()
   657  	if g1 != g2 {
   658  		panic("g1 != g2")
   659  	}
   660  
   661  	// Also test getg after some control flow, as the
   662  	// backend is sensitive to control flow.
   663  	g3 := getg()
   664  	if g1 != g3 {
   665  		panic("g1 != g3")
   666  	}
   667  }
   668  
   669  const (
   670  	PageSize         = pageSize
   671  	PallocChunkPages = pallocChunkPages
   672  	PageAlloc64Bit   = pageAlloc64Bit
   673  	PallocSumBytes   = pallocSumBytes
   674  )
   675  
   676  // Expose pallocSum for testing.
   677  type PallocSum pallocSum
   678  
   679  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   680  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   681  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   682  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   683  
   684  // Expose pallocBits for testing.
   685  type PallocBits pallocBits
   686  
   687  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   688  	return (*pallocBits)(b).find(npages, searchIdx)
   689  }
   690  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   691  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   692  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   693  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   694  
   695  // SummarizeSlow is a slow but more obviously correct implementation
   696  // of (*pallocBits).summarize. Used for testing.
   697  func SummarizeSlow(b *PallocBits) PallocSum {
   698  	var start, max, end uint
   699  
   700  	const N = uint(len(b)) * 64
   701  	for start < N && (*pageBits)(b).get(start) == 0 {
   702  		start++
   703  	}
   704  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   705  		end++
   706  	}
   707  	run := uint(0)
   708  	for i := uint(0); i < N; i++ {
   709  		if (*pageBits)(b).get(i) == 0 {
   710  			run++
   711  		} else {
   712  			run = 0
   713  		}
   714  		if run > max {
   715  			max = run
   716  		}
   717  	}
   718  	return PackPallocSum(start, max, end)
   719  }
   720  
   721  // Expose non-trivial helpers for testing.
   722  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   723  
   724  // Given two PallocBits, returns a set of bit ranges where
   725  // they differ.
   726  func DiffPallocBits(a, b *PallocBits) []BitRange {
   727  	ba := (*pageBits)(a)
   728  	bb := (*pageBits)(b)
   729  
   730  	var d []BitRange
   731  	base, size := uint(0), uint(0)
   732  	for i := uint(0); i < uint(len(ba))*64; i++ {
   733  		if ba.get(i) != bb.get(i) {
   734  			if size == 0 {
   735  				base = i
   736  			}
   737  			size++
   738  		} else {
   739  			if size != 0 {
   740  				d = append(d, BitRange{base, size})
   741  			}
   742  			size = 0
   743  		}
   744  	}
   745  	if size != 0 {
   746  		d = append(d, BitRange{base, size})
   747  	}
   748  	return d
   749  }
   750  
   751  // StringifyPallocBits gets the bits in the bit range r from b,
   752  // and returns a string containing the bits as ASCII 0 and 1
   753  // characters.
   754  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   755  	str := ""
   756  	for j := r.I; j < r.I+r.N; j++ {
   757  		if (*pageBits)(b).get(j) != 0 {
   758  			str += "1"
   759  		} else {
   760  			str += "0"
   761  		}
   762  	}
   763  	return str
   764  }
   765  
   766  // Expose pallocData for testing.
   767  type PallocData pallocData
   768  
   769  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   770  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   771  }
   772  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   773  func (d *PallocData) ScavengedSetRange(i, n uint) {
   774  	(*pallocData)(d).scavenged.setRange(i, n)
   775  }
   776  func (d *PallocData) PallocBits() *PallocBits {
   777  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   778  }
   779  func (d *PallocData) Scavenged() *PallocBits {
   780  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   781  }
   782  
   783  // Expose fillAligned for testing.
   784  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   785  
   786  // Expose pageCache for testing.
   787  type PageCache pageCache
   788  
   789  const PageCachePages = pageCachePages
   790  
   791  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   792  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   793  }
   794  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   795  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   796  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   797  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   798  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   799  	return (*pageCache)(c).alloc(npages)
   800  }
   801  func (c *PageCache) Flush(s *PageAlloc) {
   802  	cp := (*pageCache)(c)
   803  	sp := (*pageAlloc)(s)
   804  
   805  	systemstack(func() {
   806  		// None of the tests need any higher-level locking, so we just
   807  		// take the lock internally.
   808  		lock(sp.mheapLock)
   809  		cp.flush(sp)
   810  		unlock(sp.mheapLock)
   811  	})
   812  }
   813  
   814  // Expose chunk index type.
   815  type ChunkIdx chunkIdx
   816  
   817  // Expose pageAlloc for testing. Note that because pageAlloc is
   818  // not in the heap, so is PageAlloc.
   819  type PageAlloc pageAlloc
   820  
   821  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   822  	pp := (*pageAlloc)(p)
   823  
   824  	var addr, scav uintptr
   825  	systemstack(func() {
   826  		// None of the tests need any higher-level locking, so we just
   827  		// take the lock internally.
   828  		lock(pp.mheapLock)
   829  		addr, scav = pp.alloc(npages)
   830  		unlock(pp.mheapLock)
   831  	})
   832  	return addr, scav
   833  }
   834  func (p *PageAlloc) AllocToCache() PageCache {
   835  	pp := (*pageAlloc)(p)
   836  
   837  	var c PageCache
   838  	systemstack(func() {
   839  		// None of the tests need any higher-level locking, so we just
   840  		// take the lock internally.
   841  		lock(pp.mheapLock)
   842  		c = PageCache(pp.allocToCache())
   843  		unlock(pp.mheapLock)
   844  	})
   845  	return c
   846  }
   847  func (p *PageAlloc) Free(base, npages uintptr) {
   848  	pp := (*pageAlloc)(p)
   849  
   850  	systemstack(func() {
   851  		// None of the tests need any higher-level locking, so we just
   852  		// take the lock internally.
   853  		lock(pp.mheapLock)
   854  		pp.free(base, npages)
   855  		unlock(pp.mheapLock)
   856  	})
   857  }
   858  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   859  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   860  }
   861  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   862  	pp := (*pageAlloc)(p)
   863  	systemstack(func() {
   864  		r = pp.scavenge(nbytes, nil, true)
   865  	})
   866  	return
   867  }
   868  func (p *PageAlloc) InUse() []AddrRange {
   869  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   870  	for _, r := range p.inUse.ranges {
   871  		ranges = append(ranges, AddrRange{r})
   872  	}
   873  	return ranges
   874  }
   875  
   876  // Returns nil if the PallocData's L2 is missing.
   877  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   878  	ci := chunkIdx(i)
   879  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   880  }
   881  
   882  // AddrRange is a wrapper around addrRange for testing.
   883  type AddrRange struct {
   884  	addrRange
   885  }
   886  
   887  // MakeAddrRange creates a new address range.
   888  func MakeAddrRange(base, limit uintptr) AddrRange {
   889  	return AddrRange{makeAddrRange(base, limit)}
   890  }
   891  
   892  // Base returns the virtual base address of the address range.
   893  func (a AddrRange) Base() uintptr {
   894  	return a.addrRange.base.addr()
   895  }
   896  
   897  // Base returns the virtual address of the limit of the address range.
   898  func (a AddrRange) Limit() uintptr {
   899  	return a.addrRange.limit.addr()
   900  }
   901  
   902  // Equals returns true if the two address ranges are exactly equal.
   903  func (a AddrRange) Equals(b AddrRange) bool {
   904  	return a == b
   905  }
   906  
   907  // Size returns the size in bytes of the address range.
   908  func (a AddrRange) Size() uintptr {
   909  	return a.addrRange.size()
   910  }
   911  
   912  // testSysStat is the sysStat passed to test versions of various
   913  // runtime structures. We do actually have to keep track of this
   914  // because otherwise memstats.mappedReady won't actually line up
   915  // with other stats in the runtime during tests.
   916  var testSysStat = &memstats.other_sys
   917  
   918  // AddrRanges is a wrapper around addrRanges for testing.
   919  type AddrRanges struct {
   920  	addrRanges
   921  	mutable bool
   922  }
   923  
   924  // NewAddrRanges creates a new empty addrRanges.
   925  //
   926  // Note that this initializes addrRanges just like in the
   927  // runtime, so its memory is persistentalloc'd. Call this
   928  // function sparingly since the memory it allocates is
   929  // leaked.
   930  //
   931  // This AddrRanges is mutable, so we can test methods like
   932  // Add.
   933  func NewAddrRanges() AddrRanges {
   934  	r := addrRanges{}
   935  	r.init(testSysStat)
   936  	return AddrRanges{r, true}
   937  }
   938  
   939  // MakeAddrRanges creates a new addrRanges populated with
   940  // the ranges in a.
   941  //
   942  // The returned AddrRanges is immutable, so methods like
   943  // Add will fail.
   944  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   945  	// Methods that manipulate the backing store of addrRanges.ranges should
   946  	// not be used on the result from this function (e.g. add) since they may
   947  	// trigger reallocation. That would normally be fine, except the new
   948  	// backing store won't come from the heap, but from persistentalloc, so
   949  	// we'll leak some memory implicitly.
   950  	ranges := make([]addrRange, 0, len(a))
   951  	total := uintptr(0)
   952  	for _, r := range a {
   953  		ranges = append(ranges, r.addrRange)
   954  		total += r.Size()
   955  	}
   956  	return AddrRanges{addrRanges{
   957  		ranges:     ranges,
   958  		totalBytes: total,
   959  		sysStat:    testSysStat,
   960  	}, false}
   961  }
   962  
   963  // Ranges returns a copy of the ranges described by the
   964  // addrRanges.
   965  func (a *AddrRanges) Ranges() []AddrRange {
   966  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   967  	for _, r := range a.addrRanges.ranges {
   968  		result = append(result, AddrRange{r})
   969  	}
   970  	return result
   971  }
   972  
   973  // FindSucc returns the successor to base. See addrRanges.findSucc
   974  // for more details.
   975  func (a *AddrRanges) FindSucc(base uintptr) int {
   976  	return a.findSucc(base)
   977  }
   978  
   979  // Add adds a new AddrRange to the AddrRanges.
   980  //
   981  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   982  // otherwise this method will throw.
   983  func (a *AddrRanges) Add(r AddrRange) {
   984  	if !a.mutable {
   985  		throw("attempt to mutate immutable AddrRanges")
   986  	}
   987  	a.add(r.addrRange)
   988  }
   989  
   990  // TotalBytes returns the totalBytes field of the addrRanges.
   991  func (a *AddrRanges) TotalBytes() uintptr {
   992  	return a.addrRanges.totalBytes
   993  }
   994  
   995  // BitRange represents a range over a bitmap.
   996  type BitRange struct {
   997  	I, N uint // bit index and length in bits
   998  }
   999  
  1000  // NewPageAlloc creates a new page allocator for testing and
  1001  // initializes it with the scav and chunks maps. Each key in these maps
  1002  // represents a chunk index and each value is a series of bit ranges to
  1003  // set within each bitmap's chunk.
  1004  //
  1005  // The initialization of the pageAlloc preserves the invariant that if a
  1006  // scavenged bit is set the alloc bit is necessarily unset, so some
  1007  // of the bits described by scav may be cleared in the final bitmap if
  1008  // ranges in chunks overlap with them.
  1009  //
  1010  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1011  // (as opposed to all 1s, which it usually is). Furthermore, every
  1012  // chunk index in scav must appear in chunks; ones that do not are
  1013  // ignored.
  1014  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1015  	p := new(pageAlloc)
  1016  
  1017  	// We've got an entry, so initialize the pageAlloc.
  1018  	p.init(new(mutex), testSysStat, true)
  1019  	lockInit(p.mheapLock, lockRankMheap)
  1020  	for i, init := range chunks {
  1021  		addr := chunkBase(chunkIdx(i))
  1022  
  1023  		// Mark the chunk's existence in the pageAlloc.
  1024  		systemstack(func() {
  1025  			lock(p.mheapLock)
  1026  			p.grow(addr, pallocChunkBytes)
  1027  			unlock(p.mheapLock)
  1028  		})
  1029  
  1030  		// Initialize the bitmap and update pageAlloc metadata.
  1031  		ci := chunkIndex(addr)
  1032  		chunk := p.chunkOf(ci)
  1033  
  1034  		// Clear all the scavenged bits which grow set.
  1035  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1036  
  1037  		// Simulate the allocation and subsequent free of all pages in
  1038  		// the chunk for the scavenge index. This sets the state equivalent
  1039  		// with all pages within the index being free.
  1040  		p.scav.index.alloc(ci, pallocChunkPages)
  1041  		p.scav.index.free(ci, 0, pallocChunkPages)
  1042  
  1043  		// Apply scavenge state if applicable.
  1044  		if scav != nil {
  1045  			if scvg, ok := scav[i]; ok {
  1046  				for _, s := range scvg {
  1047  					// Ignore the case of s.N == 0. setRange doesn't handle
  1048  					// it and it's a no-op anyway.
  1049  					if s.N != 0 {
  1050  						chunk.scavenged.setRange(s.I, s.N)
  1051  					}
  1052  				}
  1053  			}
  1054  		}
  1055  
  1056  		// Apply alloc state.
  1057  		for _, s := range init {
  1058  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1059  			// it and it's a no-op anyway.
  1060  			if s.N != 0 {
  1061  				chunk.allocRange(s.I, s.N)
  1062  
  1063  				// Make sure the scavenge index is updated.
  1064  				p.scav.index.alloc(ci, s.N)
  1065  			}
  1066  		}
  1067  
  1068  		// Update heap metadata for the allocRange calls above.
  1069  		systemstack(func() {
  1070  			lock(p.mheapLock)
  1071  			p.update(addr, pallocChunkPages, false, false)
  1072  			unlock(p.mheapLock)
  1073  		})
  1074  	}
  1075  
  1076  	return (*PageAlloc)(p)
  1077  }
  1078  
  1079  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1080  // is called the pageAlloc may no longer be used. The object itself will be
  1081  // collected by the garbage collector once it is no longer live.
  1082  func FreePageAlloc(pp *PageAlloc) {
  1083  	p := (*pageAlloc)(pp)
  1084  
  1085  	// Free all the mapped space for the summary levels.
  1086  	if pageAlloc64Bit != 0 {
  1087  		for l := 0; l < summaryLevels; l++ {
  1088  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1089  		}
  1090  	} else {
  1091  		resSize := uintptr(0)
  1092  		for _, s := range p.summary {
  1093  			resSize += uintptr(cap(s)) * pallocSumBytes
  1094  		}
  1095  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1096  	}
  1097  
  1098  	// Free extra data structures.
  1099  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1100  
  1101  	// Subtract back out whatever we mapped for the summaries.
  1102  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1103  	// (and in anger should actually be accounted for), and there's no other
  1104  	// way to figure out how much we actually mapped.
  1105  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1106  	testSysStat.add(-int64(p.summaryMappedReady))
  1107  
  1108  	// Free the mapped space for chunks.
  1109  	for i := range p.chunks {
  1110  		if x := p.chunks[i]; x != nil {
  1111  			p.chunks[i] = nil
  1112  			// This memory comes from sysAlloc and will always be page-aligned.
  1113  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1114  		}
  1115  	}
  1116  }
  1117  
  1118  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1119  // 64 bit and 32 bit platforms, allowing the tests to share code
  1120  // between the two.
  1121  //
  1122  // This should not be higher than 0x100*pallocChunkBytes to support
  1123  // mips and mipsle, which only have 31-bit address spaces.
  1124  var BaseChunkIdx = func() ChunkIdx {
  1125  	var prefix uintptr
  1126  	if pageAlloc64Bit != 0 {
  1127  		prefix = 0xc000
  1128  	} else {
  1129  		prefix = 0x100
  1130  	}
  1131  	baseAddr := prefix * pallocChunkBytes
  1132  	if goos.IsAix != 0 {
  1133  		baseAddr += arenaBaseOffset
  1134  	}
  1135  	return ChunkIdx(chunkIndex(baseAddr))
  1136  }()
  1137  
  1138  // PageBase returns an address given a chunk index and a page index
  1139  // relative to that chunk.
  1140  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1141  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1142  }
  1143  
  1144  type BitsMismatch struct {
  1145  	Base      uintptr
  1146  	Got, Want uint64
  1147  }
  1148  
  1149  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1150  	ok = true
  1151  
  1152  	// Run on the system stack to avoid stack growth allocation.
  1153  	systemstack(func() {
  1154  		getg().m.mallocing++
  1155  
  1156  		// Lock so that we can safely access the bitmap.
  1157  		lock(&mheap_.lock)
  1158  	chunkLoop:
  1159  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1160  			chunk := mheap_.pages.tryChunkOf(i)
  1161  			if chunk == nil {
  1162  				continue
  1163  			}
  1164  			for j := 0; j < pallocChunkPages/64; j++ {
  1165  				// Run over each 64-bit bitmap section and ensure
  1166  				// scavenged is being cleared properly on allocation.
  1167  				// If a used bit and scavenged bit are both set, that's
  1168  				// an error, and could indicate a larger problem, or
  1169  				// an accounting problem.
  1170  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1171  				got := chunk.scavenged[j]
  1172  				if want != got {
  1173  					ok = false
  1174  					if n >= len(mismatches) {
  1175  						break chunkLoop
  1176  					}
  1177  					mismatches[n] = BitsMismatch{
  1178  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1179  						Got:  got,
  1180  						Want: want,
  1181  					}
  1182  					n++
  1183  				}
  1184  			}
  1185  		}
  1186  		unlock(&mheap_.lock)
  1187  
  1188  		getg().m.mallocing--
  1189  	})
  1190  	return
  1191  }
  1192  
  1193  func PageCachePagesLeaked() (leaked uintptr) {
  1194  	stopTheWorld("PageCachePagesLeaked")
  1195  
  1196  	// Walk over destroyed Ps and look for unflushed caches.
  1197  	deadp := allp[len(allp):cap(allp)]
  1198  	for _, p := range deadp {
  1199  		// Since we're going past len(allp) we may see nil Ps.
  1200  		// Just ignore them.
  1201  		if p != nil {
  1202  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1203  		}
  1204  	}
  1205  
  1206  	startTheWorld()
  1207  	return
  1208  }
  1209  
  1210  var Semacquire = semacquire
  1211  var Semrelease1 = semrelease1
  1212  
  1213  func SemNwait(addr *uint32) uint32 {
  1214  	root := semtable.rootFor(addr)
  1215  	return root.nwait.Load()
  1216  }
  1217  
  1218  const SemTableSize = semTabSize
  1219  
  1220  // SemTable is a wrapper around semTable exported for testing.
  1221  type SemTable struct {
  1222  	semTable
  1223  }
  1224  
  1225  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1226  func (t *SemTable) Enqueue(addr *uint32) {
  1227  	s := acquireSudog()
  1228  	s.releasetime = 0
  1229  	s.acquiretime = 0
  1230  	s.ticket = 0
  1231  	t.semTable.rootFor(addr).queue(addr, s, false)
  1232  }
  1233  
  1234  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1235  //
  1236  // Returns true if there actually was a waiter to be dequeued.
  1237  func (t *SemTable) Dequeue(addr *uint32) bool {
  1238  	s, _ := t.semTable.rootFor(addr).dequeue(addr)
  1239  	if s != nil {
  1240  		releaseSudog(s)
  1241  		return true
  1242  	}
  1243  	return false
  1244  }
  1245  
  1246  // mspan wrapper for testing.
  1247  type MSpan mspan
  1248  
  1249  // Allocate an mspan for testing.
  1250  func AllocMSpan() *MSpan {
  1251  	var s *mspan
  1252  	systemstack(func() {
  1253  		lock(&mheap_.lock)
  1254  		s = (*mspan)(mheap_.spanalloc.alloc())
  1255  		unlock(&mheap_.lock)
  1256  	})
  1257  	return (*MSpan)(s)
  1258  }
  1259  
  1260  // Free an allocated mspan.
  1261  func FreeMSpan(s *MSpan) {
  1262  	systemstack(func() {
  1263  		lock(&mheap_.lock)
  1264  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1265  		unlock(&mheap_.lock)
  1266  	})
  1267  }
  1268  
  1269  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1270  	s := (*mspan)(ms)
  1271  	s.nelems = uintptr(len(bits) * 8)
  1272  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1273  	result := s.countAlloc()
  1274  	s.gcmarkBits = nil
  1275  	return result
  1276  }
  1277  
  1278  const (
  1279  	TimeHistSubBucketBits = timeHistSubBucketBits
  1280  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1281  	TimeHistNumBuckets    = timeHistNumBuckets
  1282  	TimeHistMinBucketBits = timeHistMinBucketBits
  1283  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1284  )
  1285  
  1286  type TimeHistogram timeHistogram
  1287  
  1288  // Counts returns the counts for the given bucket, subBucket indices.
  1289  // Returns true if the bucket was valid, otherwise returns the counts
  1290  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1291  // bucket < 0, and false.
  1292  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1293  	t := (*timeHistogram)(th)
  1294  	if bucket < 0 {
  1295  		return t.underflow.Load(), false
  1296  	}
  1297  	i := bucket*TimeHistNumSubBuckets + subBucket
  1298  	if i >= len(t.counts) {
  1299  		return t.overflow.Load(), false
  1300  	}
  1301  	return t.counts[i].Load(), true
  1302  }
  1303  
  1304  func (th *TimeHistogram) Record(duration int64) {
  1305  	(*timeHistogram)(th).record(duration)
  1306  }
  1307  
  1308  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1309  
  1310  func SetIntArgRegs(a int) int {
  1311  	lock(&finlock)
  1312  	old := intArgRegs
  1313  	if a >= 0 {
  1314  		intArgRegs = a
  1315  	}
  1316  	unlock(&finlock)
  1317  	return old
  1318  }
  1319  
  1320  func FinalizerGAsleep() bool {
  1321  	return fingStatus.Load()&fingWait != 0
  1322  }
  1323  
  1324  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1325  // extra layer of call, since then there's a return before the "real"
  1326  // next call.
  1327  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1328  
  1329  // For GCTestIsReachable, it's important that we do this as a call so
  1330  // escape analysis can see through it.
  1331  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1332  	return gcTestIsReachable(ptrs...)
  1333  }
  1334  
  1335  // For GCTestPointerClass, it's important that we do this as a call so
  1336  // escape analysis can see through it.
  1337  //
  1338  // This is nosplit because gcTestPointerClass is.
  1339  //
  1340  //go:nosplit
  1341  func GCTestPointerClass(p unsafe.Pointer) string {
  1342  	return gcTestPointerClass(p)
  1343  }
  1344  
  1345  const Raceenabled = raceenabled
  1346  
  1347  const (
  1348  	GCBackgroundUtilization     = gcBackgroundUtilization
  1349  	GCGoalUtilization           = gcGoalUtilization
  1350  	DefaultHeapMinimum          = defaultHeapMinimum
  1351  	MemoryLimitHeapGoalHeadroom = memoryLimitHeapGoalHeadroom
  1352  )
  1353  
  1354  type GCController struct {
  1355  	gcControllerState
  1356  }
  1357  
  1358  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1359  	// Force the controller to escape. We're going to
  1360  	// do 64-bit atomics on it, and if it gets stack-allocated
  1361  	// on a 32-bit architecture, it may get allocated unaligned
  1362  	// space.
  1363  	g := Escape(new(GCController))
  1364  	g.gcControllerState.test = true // Mark it as a test copy.
  1365  	g.init(int32(gcPercent), memoryLimit)
  1366  	return g
  1367  }
  1368  
  1369  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1370  	trigger, _ := c.trigger()
  1371  	if c.heapMarked > trigger {
  1372  		trigger = c.heapMarked
  1373  	}
  1374  	c.maxStackScan.Store(stackSize)
  1375  	c.globalsScan.Store(globalsSize)
  1376  	c.heapLive.Store(trigger)
  1377  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1378  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1379  }
  1380  
  1381  func (c *GCController) AssistWorkPerByte() float64 {
  1382  	return c.assistWorkPerByte.Load()
  1383  }
  1384  
  1385  func (c *GCController) HeapGoal() uint64 {
  1386  	return c.heapGoal()
  1387  }
  1388  
  1389  func (c *GCController) HeapLive() uint64 {
  1390  	return c.heapLive.Load()
  1391  }
  1392  
  1393  func (c *GCController) HeapMarked() uint64 {
  1394  	return c.heapMarked
  1395  }
  1396  
  1397  func (c *GCController) Triggered() uint64 {
  1398  	return c.triggered
  1399  }
  1400  
  1401  type GCControllerReviseDelta struct {
  1402  	HeapLive        int64
  1403  	HeapScan        int64
  1404  	HeapScanWork    int64
  1405  	StackScanWork   int64
  1406  	GlobalsScanWork int64
  1407  }
  1408  
  1409  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1410  	c.heapLive.Add(d.HeapLive)
  1411  	c.heapScan.Add(d.HeapScan)
  1412  	c.heapScanWork.Add(d.HeapScanWork)
  1413  	c.stackScanWork.Add(d.StackScanWork)
  1414  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1415  	c.revise()
  1416  }
  1417  
  1418  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1419  	c.assistTime.Store(assistTime)
  1420  	c.endCycle(elapsed, gomaxprocs, false)
  1421  	c.resetLive(bytesMarked)
  1422  	c.commit(false)
  1423  }
  1424  
  1425  func (c *GCController) AddIdleMarkWorker() bool {
  1426  	return c.addIdleMarkWorker()
  1427  }
  1428  
  1429  func (c *GCController) NeedIdleMarkWorker() bool {
  1430  	return c.needIdleMarkWorker()
  1431  }
  1432  
  1433  func (c *GCController) RemoveIdleMarkWorker() {
  1434  	c.removeIdleMarkWorker()
  1435  }
  1436  
  1437  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1438  	c.setMaxIdleMarkWorkers(max)
  1439  }
  1440  
  1441  var alwaysFalse bool
  1442  var escapeSink any
  1443  
  1444  func Escape[T any](x T) T {
  1445  	if alwaysFalse {
  1446  		escapeSink = x
  1447  	}
  1448  	return x
  1449  }
  1450  
  1451  // Acquirem blocks preemption.
  1452  func Acquirem() {
  1453  	acquirem()
  1454  }
  1455  
  1456  func Releasem() {
  1457  	releasem(getg().m)
  1458  }
  1459  
  1460  var Timediv = timediv
  1461  
  1462  type PIController struct {
  1463  	piController
  1464  }
  1465  
  1466  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1467  	return &PIController{piController{
  1468  		kp:  kp,
  1469  		ti:  ti,
  1470  		tt:  tt,
  1471  		min: min,
  1472  		max: max,
  1473  	}}
  1474  }
  1475  
  1476  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1477  	return c.piController.next(input, setpoint, period)
  1478  }
  1479  
  1480  const (
  1481  	CapacityPerProc          = capacityPerProc
  1482  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1483  )
  1484  
  1485  type GCCPULimiter struct {
  1486  	limiter gcCPULimiterState
  1487  }
  1488  
  1489  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1490  	// Force the controller to escape. We're going to
  1491  	// do 64-bit atomics on it, and if it gets stack-allocated
  1492  	// on a 32-bit architecture, it may get allocated unaligned
  1493  	// space.
  1494  	l := Escape(new(GCCPULimiter))
  1495  	l.limiter.test = true
  1496  	l.limiter.resetCapacity(now, gomaxprocs)
  1497  	return l
  1498  }
  1499  
  1500  func (l *GCCPULimiter) Fill() uint64 {
  1501  	return l.limiter.bucket.fill
  1502  }
  1503  
  1504  func (l *GCCPULimiter) Capacity() uint64 {
  1505  	return l.limiter.bucket.capacity
  1506  }
  1507  
  1508  func (l *GCCPULimiter) Overflow() uint64 {
  1509  	return l.limiter.overflow
  1510  }
  1511  
  1512  func (l *GCCPULimiter) Limiting() bool {
  1513  	return l.limiter.limiting()
  1514  }
  1515  
  1516  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1517  	return l.limiter.needUpdate(now)
  1518  }
  1519  
  1520  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1521  	l.limiter.startGCTransition(enableGC, now)
  1522  }
  1523  
  1524  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1525  	l.limiter.finishGCTransition(now)
  1526  }
  1527  
  1528  func (l *GCCPULimiter) Update(now int64) {
  1529  	l.limiter.update(now)
  1530  }
  1531  
  1532  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1533  	l.limiter.addAssistTime(t)
  1534  }
  1535  
  1536  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1537  	l.limiter.resetCapacity(now, nprocs)
  1538  }
  1539  
  1540  const ScavengePercent = scavengePercent
  1541  
  1542  type Scavenger struct {
  1543  	Sleep      func(int64) int64
  1544  	Scavenge   func(uintptr) (uintptr, int64)
  1545  	ShouldStop func() bool
  1546  	GoMaxProcs func() int32
  1547  
  1548  	released  atomic.Uintptr
  1549  	scavenger scavengerState
  1550  	stop      chan<- struct{}
  1551  	done      <-chan struct{}
  1552  }
  1553  
  1554  func (s *Scavenger) Start() {
  1555  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1556  		panic("must populate all stubs")
  1557  	}
  1558  
  1559  	// Install hooks.
  1560  	s.scavenger.sleepStub = s.Sleep
  1561  	s.scavenger.scavenge = s.Scavenge
  1562  	s.scavenger.shouldStop = s.ShouldStop
  1563  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1564  
  1565  	// Start up scavenger goroutine, and wait for it to be ready.
  1566  	stop := make(chan struct{})
  1567  	s.stop = stop
  1568  	done := make(chan struct{})
  1569  	s.done = done
  1570  	go func() {
  1571  		// This should match bgscavenge, loosely.
  1572  		s.scavenger.init()
  1573  		s.scavenger.park()
  1574  		for {
  1575  			select {
  1576  			case <-stop:
  1577  				close(done)
  1578  				return
  1579  			default:
  1580  			}
  1581  			released, workTime := s.scavenger.run()
  1582  			if released == 0 {
  1583  				s.scavenger.park()
  1584  				continue
  1585  			}
  1586  			s.released.Add(released)
  1587  			s.scavenger.sleep(workTime)
  1588  		}
  1589  	}()
  1590  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1591  		panic("timed out waiting for scavenger to get ready")
  1592  	}
  1593  }
  1594  
  1595  // BlockUntilParked blocks until the scavenger parks, or until
  1596  // timeout is exceeded. Returns true if the scavenger parked.
  1597  //
  1598  // Note that in testing, parked means something slightly different.
  1599  // In anger, the scavenger parks to sleep, too, but in testing,
  1600  // it only parks when it actually has no work to do.
  1601  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1602  	// Just spin, waiting for it to park.
  1603  	//
  1604  	// The actual parking process is racy with respect to
  1605  	// wakeups, which is fine, but for testing we need something
  1606  	// a bit more robust.
  1607  	start := nanotime()
  1608  	for nanotime()-start < timeout {
  1609  		lock(&s.scavenger.lock)
  1610  		parked := s.scavenger.parked
  1611  		unlock(&s.scavenger.lock)
  1612  		if parked {
  1613  			return true
  1614  		}
  1615  		Gosched()
  1616  	}
  1617  	return false
  1618  }
  1619  
  1620  // Released returns how many bytes the scavenger released.
  1621  func (s *Scavenger) Released() uintptr {
  1622  	return s.released.Load()
  1623  }
  1624  
  1625  // Wake wakes up a parked scavenger to keep running.
  1626  func (s *Scavenger) Wake() {
  1627  	s.scavenger.wake()
  1628  }
  1629  
  1630  // Stop cleans up the scavenger's resources. The scavenger
  1631  // must be parked for this to work.
  1632  func (s *Scavenger) Stop() {
  1633  	lock(&s.scavenger.lock)
  1634  	parked := s.scavenger.parked
  1635  	unlock(&s.scavenger.lock)
  1636  	if !parked {
  1637  		panic("tried to clean up scavenger that is not parked")
  1638  	}
  1639  	close(s.stop)
  1640  	s.Wake()
  1641  	<-s.done
  1642  }
  1643  
  1644  type ScavengeIndex struct {
  1645  	i scavengeIndex
  1646  }
  1647  
  1648  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1649  	s := new(ScavengeIndex)
  1650  	// This is a bit lazy but we easily guarantee we'll be able
  1651  	// to reference all the relevant chunks. The worst-case
  1652  	// memory usage here is 512 MiB, but tests generally use
  1653  	// small offsets from BaseChunkIdx, which results in ~100s
  1654  	// of KiB in memory use.
  1655  	//
  1656  	// This may still be worth making better, at least by sharing
  1657  	// this fairly large array across calls with a sync.Pool or
  1658  	// something. Currently, when the tests are run serially,
  1659  	// it takes around 0.5s. Not all that much, but if we have
  1660  	// a lot of tests like this it could add up.
  1661  	s.i.chunks = make([]atomicScavChunkData, max)
  1662  	s.i.min.Store(uintptr(min))
  1663  	s.i.max.Store(uintptr(max))
  1664  	s.i.minHeapIdx.Store(uintptr(min))
  1665  	s.i.test = true
  1666  	return s
  1667  }
  1668  
  1669  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1670  	ci, off := s.i.find(force)
  1671  	return ChunkIdx(ci), off
  1672  }
  1673  
  1674  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1675  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1676  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1677  
  1678  	if sc == ec {
  1679  		// The range doesn't cross any chunk boundaries.
  1680  		s.i.alloc(sc, ei+1-si)
  1681  	} else {
  1682  		// The range crosses at least one chunk boundary.
  1683  		s.i.alloc(sc, pallocChunkPages-si)
  1684  		for c := sc + 1; c < ec; c++ {
  1685  			s.i.alloc(c, pallocChunkPages)
  1686  		}
  1687  		s.i.alloc(ec, ei+1)
  1688  	}
  1689  }
  1690  
  1691  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1692  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1693  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1694  
  1695  	if sc == ec {
  1696  		// The range doesn't cross any chunk boundaries.
  1697  		s.i.free(sc, si, ei+1-si)
  1698  	} else {
  1699  		// The range crosses at least one chunk boundary.
  1700  		s.i.free(sc, si, pallocChunkPages-si)
  1701  		for c := sc + 1; c < ec; c++ {
  1702  			s.i.free(c, 0, pallocChunkPages)
  1703  		}
  1704  		s.i.free(ec, 0, ei+1)
  1705  	}
  1706  }
  1707  
  1708  func (s *ScavengeIndex) ResetSearchAddrs() {
  1709  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1710  		addr, marked := a.Load()
  1711  		if marked {
  1712  			a.StoreUnmark(addr, addr)
  1713  		}
  1714  		a.Clear()
  1715  	}
  1716  	s.i.freeHWM = minOffAddr
  1717  }
  1718  
  1719  func (s *ScavengeIndex) NextGen() {
  1720  	s.i.nextGen()
  1721  }
  1722  
  1723  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1724  	s.i.setEmpty(chunkIdx(ci))
  1725  }
  1726  
  1727  func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) bool {
  1728  	return s.i.setNoHugePage(chunkIdx(ci))
  1729  }
  1730  
  1731  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1732  	sc0 := scavChunkData{
  1733  		gen:            gen,
  1734  		inUse:          inUse,
  1735  		lastInUse:      lastInUse,
  1736  		scavChunkFlags: scavChunkFlags(flags),
  1737  	}
  1738  	scp := sc0.pack()
  1739  	sc1 := unpackScavChunkData(scp)
  1740  	return sc0 == sc1
  1741  }
  1742  
  1743  const GTrackingPeriod = gTrackingPeriod
  1744  
  1745  var ZeroBase = unsafe.Pointer(&zerobase)
  1746  
  1747  const UserArenaChunkBytes = userArenaChunkBytes
  1748  
  1749  type UserArena struct {
  1750  	arena *userArena
  1751  }
  1752  
  1753  func NewUserArena() *UserArena {
  1754  	return &UserArena{newUserArena()}
  1755  }
  1756  
  1757  func (a *UserArena) New(out *any) {
  1758  	i := efaceOf(out)
  1759  	typ := i._type
  1760  	if typ.Kind_&kindMask != kindPtr {
  1761  		panic("new result of non-ptr type")
  1762  	}
  1763  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1764  	i.data = a.arena.new(typ)
  1765  }
  1766  
  1767  func (a *UserArena) Slice(sl any, cap int) {
  1768  	a.arena.slice(sl, cap)
  1769  }
  1770  
  1771  func (a *UserArena) Free() {
  1772  	a.arena.free()
  1773  }
  1774  
  1775  func GlobalWaitingArenaChunks() int {
  1776  	n := 0
  1777  	systemstack(func() {
  1778  		lock(&mheap_.lock)
  1779  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1780  			n++
  1781  		}
  1782  		unlock(&mheap_.lock)
  1783  	})
  1784  	return n
  1785  }
  1786  
  1787  func UserArenaClone[T any](s T) T {
  1788  	return arena_heapify(s).(T)
  1789  }
  1790  
  1791  var AlignUp = alignUp
  1792  
  1793  // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
  1794  // queue is emptied (and the finalizers have executed) or the timeout
  1795  // is reached. Returns true if the finalizer queue was emptied.
  1796  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1797  	start := nanotime()
  1798  	for nanotime()-start < timeout {
  1799  		lock(&finlock)
  1800  		// We know the queue has been drained when both finq is nil
  1801  		// and the finalizer g has stopped executing.
  1802  		empty := finq == nil
  1803  		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
  1804  		unlock(&finlock)
  1805  		if empty {
  1806  			return true
  1807  		}
  1808  		Gosched()
  1809  	}
  1810  	return false
  1811  }
  1812  
  1813  func FrameStartLine(f *Frame) int {
  1814  	return f.startLine
  1815  }
  1816  
  1817  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1818  // This memory will never be freed; use sparingly.
  1819  func PersistentAlloc(n uintptr) unsafe.Pointer {
  1820  	return persistentalloc(n, 0, &memstats.other_sys)
  1821  }
  1822  
  1823  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1824  // pcBuf with the return addresses of the physical frames on the stack.
  1825  func FPCallers(pcBuf []uintptr) int {
  1826  	return fpTracebackPCs(unsafe.Pointer(getcallerfp()), pcBuf)
  1827  }