github.com/lovishpuri/go-40569/src@v0.0.0-20230519171745-f8623e7c56cf/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  var Fadd64 = fadd64
    18  var Fsub64 = fsub64
    19  var Fmul64 = fmul64
    20  var Fdiv64 = fdiv64
    21  var F64to32 = f64to32
    22  var F32to64 = f32to64
    23  var Fcmp64 = fcmp64
    24  var Fintto64 = fintto64
    25  var F64toint = f64toint
    26  
    27  var Entersyscall = entersyscall
    28  var Exitsyscall = exitsyscall
    29  var LockedOSThread = lockedOSThread
    30  var Xadduintptr = atomic.Xadduintptr
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  var ParseByteCount = parseByteCount
    37  
    38  var Nanotime = nanotime
    39  var NetpollBreak = netpollBreak
    40  var Usleep = usleep
    41  
    42  var PhysPageSize = physPageSize
    43  var PhysHugePageSize = physHugePageSize
    44  
    45  var NetpollGenericInit = netpollGenericInit
    46  
    47  var Memmove = memmove
    48  var MemclrNoHeapPointers = memclrNoHeapPointers
    49  
    50  var CgoCheckPointer = cgoCheckPointer
    51  
    52  const TracebackInnerFrames = tracebackInnerFrames
    53  const TracebackOuterFrames = tracebackOuterFrames
    54  
    55  var LockPartialOrder = lockPartialOrder
    56  
    57  type LockRank lockRank
    58  
    59  func (l LockRank) String() string {
    60  	return lockRank(l).String()
    61  }
    62  
    63  const PreemptMSupported = preemptMSupported
    64  
    65  type LFNode struct {
    66  	Next    uint64
    67  	Pushcnt uintptr
    68  }
    69  
    70  func LFStackPush(head *uint64, node *LFNode) {
    71  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    72  }
    73  
    74  func LFStackPop(head *uint64) *LFNode {
    75  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    76  }
    77  func LFNodeValidate(node *LFNode) {
    78  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    79  }
    80  
    81  func Netpoll(delta int64) {
    82  	systemstack(func() {
    83  		netpoll(delta)
    84  	})
    85  }
    86  
    87  func GCMask(x any) (ret []byte) {
    88  	systemstack(func() {
    89  		ret = getgcmask(x)
    90  	})
    91  	return
    92  }
    93  
    94  func RunSchedLocalQueueTest() {
    95  	pp := new(p)
    96  	gs := make([]g, len(pp.runq))
    97  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
    98  	for i := 0; i < len(pp.runq); i++ {
    99  		if g, _ := runqget(pp); g != nil {
   100  			throw("runq is not empty initially")
   101  		}
   102  		for j := 0; j < i; j++ {
   103  			runqput(pp, &gs[i], false)
   104  		}
   105  		for j := 0; j < i; j++ {
   106  			if g, _ := runqget(pp); g != &gs[i] {
   107  				print("bad element at iter ", i, "/", j, "\n")
   108  				throw("bad element")
   109  			}
   110  		}
   111  		if g, _ := runqget(pp); g != nil {
   112  			throw("runq is not empty afterwards")
   113  		}
   114  	}
   115  }
   116  
   117  func RunSchedLocalQueueStealTest() {
   118  	p1 := new(p)
   119  	p2 := new(p)
   120  	gs := make([]g, len(p1.runq))
   121  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   122  	for i := 0; i < len(p1.runq); i++ {
   123  		for j := 0; j < i; j++ {
   124  			gs[j].sig = 0
   125  			runqput(p1, &gs[j], false)
   126  		}
   127  		gp := runqsteal(p2, p1, true)
   128  		s := 0
   129  		if gp != nil {
   130  			s++
   131  			gp.sig++
   132  		}
   133  		for {
   134  			gp, _ = runqget(p2)
   135  			if gp == nil {
   136  				break
   137  			}
   138  			s++
   139  			gp.sig++
   140  		}
   141  		for {
   142  			gp, _ = runqget(p1)
   143  			if gp == nil {
   144  				break
   145  			}
   146  			gp.sig++
   147  		}
   148  		for j := 0; j < i; j++ {
   149  			if gs[j].sig != 1 {
   150  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   151  				throw("bad element")
   152  			}
   153  		}
   154  		if s != i/2 && s != i/2+1 {
   155  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   156  			throw("bad steal")
   157  		}
   158  	}
   159  }
   160  
   161  func RunSchedLocalQueueEmptyTest(iters int) {
   162  	// Test that runq is not spuriously reported as empty.
   163  	// Runq emptiness affects scheduling decisions and spurious emptiness
   164  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   165  	// for arbitrary long time).
   166  	done := make(chan bool, 1)
   167  	p := new(p)
   168  	gs := make([]g, 2)
   169  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   170  	ready := new(uint32)
   171  	for i := 0; i < iters; i++ {
   172  		*ready = 0
   173  		next0 := (i & 1) == 0
   174  		next1 := (i & 2) == 0
   175  		runqput(p, &gs[0], next0)
   176  		go func() {
   177  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   178  			}
   179  			if runqempty(p) {
   180  				println("next:", next0, next1)
   181  				throw("queue is empty")
   182  			}
   183  			done <- true
   184  		}()
   185  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   186  		}
   187  		runqput(p, &gs[1], next1)
   188  		runqget(p)
   189  		<-done
   190  		runqget(p)
   191  	}
   192  }
   193  
   194  var (
   195  	StringHash = stringHash
   196  	BytesHash  = bytesHash
   197  	Int32Hash  = int32Hash
   198  	Int64Hash  = int64Hash
   199  	MemHash    = memhash
   200  	MemHash32  = memhash32
   201  	MemHash64  = memhash64
   202  	EfaceHash  = efaceHash
   203  	IfaceHash  = ifaceHash
   204  )
   205  
   206  var UseAeshash = &useAeshash
   207  
   208  func MemclrBytes(b []byte) {
   209  	s := (*slice)(unsafe.Pointer(&b))
   210  	memclrNoHeapPointers(s.array, uintptr(s.len))
   211  }
   212  
   213  const HashLoad = hashLoad
   214  
   215  // entry point for testing
   216  func GostringW(w []uint16) (s string) {
   217  	systemstack(func() {
   218  		s = gostringw(&w[0])
   219  	})
   220  	return
   221  }
   222  
   223  var Open = open
   224  var Close = closefd
   225  var Read = read
   226  var Write = write
   227  
   228  func Envs() []string     { return envs }
   229  func SetEnvs(e []string) { envs = e }
   230  
   231  // For benchmarking.
   232  
   233  func BenchSetType(n int, x any) {
   234  	// Escape x to ensure it is allocated on the heap, as we are
   235  	// working on the heap bits here.
   236  	Escape(x)
   237  	e := *efaceOf(&x)
   238  	t := e._type
   239  	var size uintptr
   240  	var p unsafe.Pointer
   241  	switch t.Kind_ & kindMask {
   242  	case kindPtr:
   243  		t = (*ptrtype)(unsafe.Pointer(t)).Elem
   244  		size = t.Size_
   245  		p = e.data
   246  	case kindSlice:
   247  		slice := *(*struct {
   248  			ptr      unsafe.Pointer
   249  			len, cap uintptr
   250  		})(e.data)
   251  		t = (*slicetype)(unsafe.Pointer(t)).Elem
   252  		size = t.Size_ * slice.len
   253  		p = slice.ptr
   254  	}
   255  	allocSize := roundupsize(size)
   256  	systemstack(func() {
   257  		for i := 0; i < n; i++ {
   258  			heapBitsSetType(uintptr(p), allocSize, size, t)
   259  		}
   260  	})
   261  }
   262  
   263  const PtrSize = goarch.PtrSize
   264  
   265  var ForceGCPeriod = &forcegcperiod
   266  
   267  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   268  // the "environment" traceback level, so later calls to
   269  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   270  func SetTracebackEnv(level string) {
   271  	setTraceback(level)
   272  	traceback_env = traceback_cache
   273  }
   274  
   275  var ReadUnaligned32 = readUnaligned32
   276  var ReadUnaligned64 = readUnaligned64
   277  
   278  func CountPagesInUse() (pagesInUse, counted uintptr) {
   279  	stopTheWorld("CountPagesInUse")
   280  
   281  	pagesInUse = uintptr(mheap_.pagesInUse.Load())
   282  
   283  	for _, s := range mheap_.allspans {
   284  		if s.state.get() == mSpanInUse {
   285  			counted += s.npages
   286  		}
   287  	}
   288  
   289  	startTheWorld()
   290  
   291  	return
   292  }
   293  
   294  func Fastrand() uint32          { return fastrand() }
   295  func Fastrand64() uint64        { return fastrand64() }
   296  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   297  
   298  type ProfBuf profBuf
   299  
   300  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   301  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   302  }
   303  
   304  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   305  	(*profBuf)(p).write(tag, now, hdr, stk)
   306  }
   307  
   308  const (
   309  	ProfBufBlocking    = profBufBlocking
   310  	ProfBufNonBlocking = profBufNonBlocking
   311  )
   312  
   313  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   314  	return (*profBuf)(p).read(profBufReadMode(mode))
   315  }
   316  
   317  func (p *ProfBuf) Close() {
   318  	(*profBuf)(p).close()
   319  }
   320  
   321  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   322  	stopTheWorld("ReadMetricsSlow")
   323  
   324  	// Initialize the metrics beforehand because this could
   325  	// allocate and skew the stats.
   326  	metricsLock()
   327  	initMetrics()
   328  	metricsUnlock()
   329  
   330  	systemstack(func() {
   331  		// Read memstats first. It's going to flush
   332  		// the mcaches which readMetrics does not do, so
   333  		// going the other way around may result in
   334  		// inconsistent statistics.
   335  		readmemstats_m(memStats)
   336  	})
   337  
   338  	// Read metrics off the system stack.
   339  	//
   340  	// The only part of readMetrics that could allocate
   341  	// and skew the stats is initMetrics.
   342  	readMetrics(samplesp, len, cap)
   343  
   344  	startTheWorld()
   345  }
   346  
   347  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   348  // MemStats accumulated by scanning the heap.
   349  func ReadMemStatsSlow() (base, slow MemStats) {
   350  	stopTheWorld("ReadMemStatsSlow")
   351  
   352  	// Run on the system stack to avoid stack growth allocation.
   353  	systemstack(func() {
   354  		// Make sure stats don't change.
   355  		getg().m.mallocing++
   356  
   357  		readmemstats_m(&base)
   358  
   359  		// Initialize slow from base and zero the fields we're
   360  		// recomputing.
   361  		slow = base
   362  		slow.Alloc = 0
   363  		slow.TotalAlloc = 0
   364  		slow.Mallocs = 0
   365  		slow.Frees = 0
   366  		slow.HeapReleased = 0
   367  		var bySize [_NumSizeClasses]struct {
   368  			Mallocs, Frees uint64
   369  		}
   370  
   371  		// Add up current allocations in spans.
   372  		for _, s := range mheap_.allspans {
   373  			if s.state.get() != mSpanInUse {
   374  				continue
   375  			}
   376  			if s.isUnusedUserArenaChunk() {
   377  				continue
   378  			}
   379  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   380  				slow.Mallocs++
   381  				slow.Alloc += uint64(s.elemsize)
   382  			} else {
   383  				slow.Mallocs += uint64(s.allocCount)
   384  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   385  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   386  			}
   387  		}
   388  
   389  		// Add in frees by just reading the stats for those directly.
   390  		var m heapStatsDelta
   391  		memstats.heapStats.unsafeRead(&m)
   392  
   393  		// Collect per-sizeclass free stats.
   394  		var smallFree uint64
   395  		for i := 0; i < _NumSizeClasses; i++ {
   396  			slow.Frees += uint64(m.smallFreeCount[i])
   397  			bySize[i].Frees += uint64(m.smallFreeCount[i])
   398  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
   399  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
   400  		}
   401  		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
   402  		slow.Mallocs += slow.Frees
   403  
   404  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
   405  
   406  		for i := range slow.BySize {
   407  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   408  			slow.BySize[i].Frees = bySize[i].Frees
   409  		}
   410  
   411  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   412  			chunk := mheap_.pages.tryChunkOf(i)
   413  			if chunk == nil {
   414  				continue
   415  			}
   416  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   417  			slow.HeapReleased += uint64(pg) * pageSize
   418  		}
   419  		for _, p := range allp {
   420  			pg := sys.OnesCount64(p.pcache.scav)
   421  			slow.HeapReleased += uint64(pg) * pageSize
   422  		}
   423  
   424  		getg().m.mallocing--
   425  	})
   426  
   427  	startTheWorld()
   428  	return
   429  }
   430  
   431  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   432  // and verifies that unwinding the new stack doesn't crash, even if the old
   433  // stack has been freed or reused (simulated via poisoning).
   434  func ShrinkStackAndVerifyFramePointers() {
   435  	before := stackPoisonCopy
   436  	defer func() { stackPoisonCopy = before }()
   437  	stackPoisonCopy = 1
   438  
   439  	gp := getg()
   440  	systemstack(func() {
   441  		shrinkstack(gp)
   442  	})
   443  	// If our new stack contains frame pointers into the old stack, this will
   444  	// crash because the old stack has been poisoned.
   445  	FPCallers(make([]uintptr, 1024))
   446  }
   447  
   448  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   449  // stderr, and blocks in a stack containing
   450  // "runtime.blockOnSystemStackInternal".
   451  func BlockOnSystemStack() {
   452  	systemstack(blockOnSystemStackInternal)
   453  }
   454  
   455  func blockOnSystemStackInternal() {
   456  	print("x\n")
   457  	lock(&deadlock)
   458  	lock(&deadlock)
   459  }
   460  
   461  type RWMutex struct {
   462  	rw rwmutex
   463  }
   464  
   465  func (rw *RWMutex) RLock() {
   466  	rw.rw.rlock()
   467  }
   468  
   469  func (rw *RWMutex) RUnlock() {
   470  	rw.rw.runlock()
   471  }
   472  
   473  func (rw *RWMutex) Lock() {
   474  	rw.rw.lock()
   475  }
   476  
   477  func (rw *RWMutex) Unlock() {
   478  	rw.rw.unlock()
   479  }
   480  
   481  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   482  
   483  func MapBucketsCount(m map[int]int) int {
   484  	h := *(**hmap)(unsafe.Pointer(&m))
   485  	return 1 << h.B
   486  }
   487  
   488  func MapBucketsPointerIsNil(m map[int]int) bool {
   489  	h := *(**hmap)(unsafe.Pointer(&m))
   490  	return h.buckets == nil
   491  }
   492  
   493  func LockOSCounts() (external, internal uint32) {
   494  	gp := getg()
   495  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   496  		if gp.lockedm != 0 {
   497  			panic("lockedm on non-locked goroutine")
   498  		}
   499  	} else {
   500  		if gp.lockedm == 0 {
   501  			panic("nil lockedm on locked goroutine")
   502  		}
   503  	}
   504  	return gp.m.lockedExt, gp.m.lockedInt
   505  }
   506  
   507  //go:noinline
   508  func TracebackSystemstack(stk []uintptr, i int) int {
   509  	if i == 0 {
   510  		pc, sp := getcallerpc(), getcallersp()
   511  		var u unwinder
   512  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   513  		return tracebackPCs(&u, 0, stk)
   514  	}
   515  	n := 0
   516  	systemstack(func() {
   517  		n = TracebackSystemstack(stk, i-1)
   518  	})
   519  	return n
   520  }
   521  
   522  func KeepNArenaHints(n int) {
   523  	hint := mheap_.arenaHints
   524  	for i := 1; i < n; i++ {
   525  		hint = hint.next
   526  		if hint == nil {
   527  			return
   528  		}
   529  	}
   530  	hint.next = nil
   531  }
   532  
   533  // MapNextArenaHint reserves a page at the next arena growth hint,
   534  // preventing the arena from growing there, and returns the range of
   535  // addresses that are no longer viable.
   536  //
   537  // This may fail to reserve memory. If it fails, it still returns the
   538  // address range it attempted to reserve.
   539  func MapNextArenaHint() (start, end uintptr, ok bool) {
   540  	hint := mheap_.arenaHints
   541  	addr := hint.addr
   542  	if hint.down {
   543  		start, end = addr-heapArenaBytes, addr
   544  		addr -= physPageSize
   545  	} else {
   546  		start, end = addr, addr+heapArenaBytes
   547  	}
   548  	got := sysReserve(unsafe.Pointer(addr), physPageSize)
   549  	ok = (addr == uintptr(got))
   550  	if !ok {
   551  		// We were unable to get the requested reservation.
   552  		// Release what we did get and fail.
   553  		sysFreeOS(got, physPageSize)
   554  	}
   555  	return
   556  }
   557  
   558  func GetNextArenaHint() uintptr {
   559  	return mheap_.arenaHints.addr
   560  }
   561  
   562  type G = g
   563  
   564  type Sudog = sudog
   565  
   566  func Getg() *G {
   567  	return getg()
   568  }
   569  
   570  func Goid() uint64 {
   571  	return getg().goid
   572  }
   573  
   574  func GIsWaitingOnMutex(gp *G) bool {
   575  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   576  }
   577  
   578  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   579  
   580  //go:noinline
   581  func PanicForTesting(b []byte, i int) byte {
   582  	return unexportedPanicForTesting(b, i)
   583  }
   584  
   585  //go:noinline
   586  func unexportedPanicForTesting(b []byte, i int) byte {
   587  	return b[i]
   588  }
   589  
   590  func G0StackOverflow() {
   591  	systemstack(func() {
   592  		stackOverflow(nil)
   593  	})
   594  }
   595  
   596  func stackOverflow(x *byte) {
   597  	var buf [256]byte
   598  	stackOverflow(&buf[0])
   599  }
   600  
   601  func MapTombstoneCheck(m map[int]int) {
   602  	// Make sure emptyOne and emptyRest are distributed correctly.
   603  	// We should have a series of filled and emptyOne cells, followed by
   604  	// a series of emptyRest cells.
   605  	h := *(**hmap)(unsafe.Pointer(&m))
   606  	i := any(m)
   607  	t := *(**maptype)(unsafe.Pointer(&i))
   608  
   609  	for x := 0; x < 1<<h.B; x++ {
   610  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
   611  		n := 0
   612  		for b := b0; b != nil; b = b.overflow(t) {
   613  			for i := 0; i < bucketCnt; i++ {
   614  				if b.tophash[i] != emptyRest {
   615  					n++
   616  				}
   617  			}
   618  		}
   619  		k := 0
   620  		for b := b0; b != nil; b = b.overflow(t) {
   621  			for i := 0; i < bucketCnt; i++ {
   622  				if k < n && b.tophash[i] == emptyRest {
   623  					panic("early emptyRest")
   624  				}
   625  				if k >= n && b.tophash[i] != emptyRest {
   626  					panic("late non-emptyRest")
   627  				}
   628  				if k == n-1 && b.tophash[i] == emptyOne {
   629  					panic("last non-emptyRest entry is emptyOne")
   630  				}
   631  				k++
   632  			}
   633  		}
   634  	}
   635  }
   636  
   637  func RunGetgThreadSwitchTest() {
   638  	// Test that getg works correctly with thread switch.
   639  	// With gccgo, if we generate getg inlined, the backend
   640  	// may cache the address of the TLS variable, which
   641  	// will become invalid after a thread switch. This test
   642  	// checks that the bad caching doesn't happen.
   643  
   644  	ch := make(chan int)
   645  	go func(ch chan int) {
   646  		ch <- 5
   647  		LockOSThread()
   648  	}(ch)
   649  
   650  	g1 := getg()
   651  
   652  	// Block on a receive. This is likely to get us a thread
   653  	// switch. If we yield to the sender goroutine, it will
   654  	// lock the thread, forcing us to resume on a different
   655  	// thread.
   656  	<-ch
   657  
   658  	g2 := getg()
   659  	if g1 != g2 {
   660  		panic("g1 != g2")
   661  	}
   662  
   663  	// Also test getg after some control flow, as the
   664  	// backend is sensitive to control flow.
   665  	g3 := getg()
   666  	if g1 != g3 {
   667  		panic("g1 != g3")
   668  	}
   669  }
   670  
   671  const (
   672  	PageSize         = pageSize
   673  	PallocChunkPages = pallocChunkPages
   674  	PageAlloc64Bit   = pageAlloc64Bit
   675  	PallocSumBytes   = pallocSumBytes
   676  )
   677  
   678  // Expose pallocSum for testing.
   679  type PallocSum pallocSum
   680  
   681  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   682  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   683  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   684  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   685  
   686  // Expose pallocBits for testing.
   687  type PallocBits pallocBits
   688  
   689  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   690  	return (*pallocBits)(b).find(npages, searchIdx)
   691  }
   692  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   693  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   694  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   695  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   696  
   697  // SummarizeSlow is a slow but more obviously correct implementation
   698  // of (*pallocBits).summarize. Used for testing.
   699  func SummarizeSlow(b *PallocBits) PallocSum {
   700  	var start, max, end uint
   701  
   702  	const N = uint(len(b)) * 64
   703  	for start < N && (*pageBits)(b).get(start) == 0 {
   704  		start++
   705  	}
   706  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   707  		end++
   708  	}
   709  	run := uint(0)
   710  	for i := uint(0); i < N; i++ {
   711  		if (*pageBits)(b).get(i) == 0 {
   712  			run++
   713  		} else {
   714  			run = 0
   715  		}
   716  		if run > max {
   717  			max = run
   718  		}
   719  	}
   720  	return PackPallocSum(start, max, end)
   721  }
   722  
   723  // Expose non-trivial helpers for testing.
   724  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   725  
   726  // Given two PallocBits, returns a set of bit ranges where
   727  // they differ.
   728  func DiffPallocBits(a, b *PallocBits) []BitRange {
   729  	ba := (*pageBits)(a)
   730  	bb := (*pageBits)(b)
   731  
   732  	var d []BitRange
   733  	base, size := uint(0), uint(0)
   734  	for i := uint(0); i < uint(len(ba))*64; i++ {
   735  		if ba.get(i) != bb.get(i) {
   736  			if size == 0 {
   737  				base = i
   738  			}
   739  			size++
   740  		} else {
   741  			if size != 0 {
   742  				d = append(d, BitRange{base, size})
   743  			}
   744  			size = 0
   745  		}
   746  	}
   747  	if size != 0 {
   748  		d = append(d, BitRange{base, size})
   749  	}
   750  	return d
   751  }
   752  
   753  // StringifyPallocBits gets the bits in the bit range r from b,
   754  // and returns a string containing the bits as ASCII 0 and 1
   755  // characters.
   756  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   757  	str := ""
   758  	for j := r.I; j < r.I+r.N; j++ {
   759  		if (*pageBits)(b).get(j) != 0 {
   760  			str += "1"
   761  		} else {
   762  			str += "0"
   763  		}
   764  	}
   765  	return str
   766  }
   767  
   768  // Expose pallocData for testing.
   769  type PallocData pallocData
   770  
   771  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   772  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   773  }
   774  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   775  func (d *PallocData) ScavengedSetRange(i, n uint) {
   776  	(*pallocData)(d).scavenged.setRange(i, n)
   777  }
   778  func (d *PallocData) PallocBits() *PallocBits {
   779  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   780  }
   781  func (d *PallocData) Scavenged() *PallocBits {
   782  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   783  }
   784  
   785  // Expose fillAligned for testing.
   786  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   787  
   788  // Expose pageCache for testing.
   789  type PageCache pageCache
   790  
   791  const PageCachePages = pageCachePages
   792  
   793  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   794  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   795  }
   796  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   797  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   798  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   799  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   800  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   801  	return (*pageCache)(c).alloc(npages)
   802  }
   803  func (c *PageCache) Flush(s *PageAlloc) {
   804  	cp := (*pageCache)(c)
   805  	sp := (*pageAlloc)(s)
   806  
   807  	systemstack(func() {
   808  		// None of the tests need any higher-level locking, so we just
   809  		// take the lock internally.
   810  		lock(sp.mheapLock)
   811  		cp.flush(sp)
   812  		unlock(sp.mheapLock)
   813  	})
   814  }
   815  
   816  // Expose chunk index type.
   817  type ChunkIdx chunkIdx
   818  
   819  // Expose pageAlloc for testing. Note that because pageAlloc is
   820  // not in the heap, so is PageAlloc.
   821  type PageAlloc pageAlloc
   822  
   823  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   824  	pp := (*pageAlloc)(p)
   825  
   826  	var addr, scav uintptr
   827  	systemstack(func() {
   828  		// None of the tests need any higher-level locking, so we just
   829  		// take the lock internally.
   830  		lock(pp.mheapLock)
   831  		addr, scav = pp.alloc(npages)
   832  		unlock(pp.mheapLock)
   833  	})
   834  	return addr, scav
   835  }
   836  func (p *PageAlloc) AllocToCache() PageCache {
   837  	pp := (*pageAlloc)(p)
   838  
   839  	var c PageCache
   840  	systemstack(func() {
   841  		// None of the tests need any higher-level locking, so we just
   842  		// take the lock internally.
   843  		lock(pp.mheapLock)
   844  		c = PageCache(pp.allocToCache())
   845  		unlock(pp.mheapLock)
   846  	})
   847  	return c
   848  }
   849  func (p *PageAlloc) Free(base, npages uintptr) {
   850  	pp := (*pageAlloc)(p)
   851  
   852  	systemstack(func() {
   853  		// None of the tests need any higher-level locking, so we just
   854  		// take the lock internally.
   855  		lock(pp.mheapLock)
   856  		pp.free(base, npages)
   857  		unlock(pp.mheapLock)
   858  	})
   859  }
   860  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   861  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   862  }
   863  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   864  	pp := (*pageAlloc)(p)
   865  	systemstack(func() {
   866  		r = pp.scavenge(nbytes, nil, true)
   867  	})
   868  	return
   869  }
   870  func (p *PageAlloc) InUse() []AddrRange {
   871  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   872  	for _, r := range p.inUse.ranges {
   873  		ranges = append(ranges, AddrRange{r})
   874  	}
   875  	return ranges
   876  }
   877  
   878  // Returns nil if the PallocData's L2 is missing.
   879  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   880  	ci := chunkIdx(i)
   881  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   882  }
   883  
   884  // AddrRange is a wrapper around addrRange for testing.
   885  type AddrRange struct {
   886  	addrRange
   887  }
   888  
   889  // MakeAddrRange creates a new address range.
   890  func MakeAddrRange(base, limit uintptr) AddrRange {
   891  	return AddrRange{makeAddrRange(base, limit)}
   892  }
   893  
   894  // Base returns the virtual base address of the address range.
   895  func (a AddrRange) Base() uintptr {
   896  	return a.addrRange.base.addr()
   897  }
   898  
   899  // Base returns the virtual address of the limit of the address range.
   900  func (a AddrRange) Limit() uintptr {
   901  	return a.addrRange.limit.addr()
   902  }
   903  
   904  // Equals returns true if the two address ranges are exactly equal.
   905  func (a AddrRange) Equals(b AddrRange) bool {
   906  	return a == b
   907  }
   908  
   909  // Size returns the size in bytes of the address range.
   910  func (a AddrRange) Size() uintptr {
   911  	return a.addrRange.size()
   912  }
   913  
   914  // testSysStat is the sysStat passed to test versions of various
   915  // runtime structures. We do actually have to keep track of this
   916  // because otherwise memstats.mappedReady won't actually line up
   917  // with other stats in the runtime during tests.
   918  var testSysStat = &memstats.other_sys
   919  
   920  // AddrRanges is a wrapper around addrRanges for testing.
   921  type AddrRanges struct {
   922  	addrRanges
   923  	mutable bool
   924  }
   925  
   926  // NewAddrRanges creates a new empty addrRanges.
   927  //
   928  // Note that this initializes addrRanges just like in the
   929  // runtime, so its memory is persistentalloc'd. Call this
   930  // function sparingly since the memory it allocates is
   931  // leaked.
   932  //
   933  // This AddrRanges is mutable, so we can test methods like
   934  // Add.
   935  func NewAddrRanges() AddrRanges {
   936  	r := addrRanges{}
   937  	r.init(testSysStat)
   938  	return AddrRanges{r, true}
   939  }
   940  
   941  // MakeAddrRanges creates a new addrRanges populated with
   942  // the ranges in a.
   943  //
   944  // The returned AddrRanges is immutable, so methods like
   945  // Add will fail.
   946  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   947  	// Methods that manipulate the backing store of addrRanges.ranges should
   948  	// not be used on the result from this function (e.g. add) since they may
   949  	// trigger reallocation. That would normally be fine, except the new
   950  	// backing store won't come from the heap, but from persistentalloc, so
   951  	// we'll leak some memory implicitly.
   952  	ranges := make([]addrRange, 0, len(a))
   953  	total := uintptr(0)
   954  	for _, r := range a {
   955  		ranges = append(ranges, r.addrRange)
   956  		total += r.Size()
   957  	}
   958  	return AddrRanges{addrRanges{
   959  		ranges:     ranges,
   960  		totalBytes: total,
   961  		sysStat:    testSysStat,
   962  	}, false}
   963  }
   964  
   965  // Ranges returns a copy of the ranges described by the
   966  // addrRanges.
   967  func (a *AddrRanges) Ranges() []AddrRange {
   968  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   969  	for _, r := range a.addrRanges.ranges {
   970  		result = append(result, AddrRange{r})
   971  	}
   972  	return result
   973  }
   974  
   975  // FindSucc returns the successor to base. See addrRanges.findSucc
   976  // for more details.
   977  func (a *AddrRanges) FindSucc(base uintptr) int {
   978  	return a.findSucc(base)
   979  }
   980  
   981  // Add adds a new AddrRange to the AddrRanges.
   982  //
   983  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   984  // otherwise this method will throw.
   985  func (a *AddrRanges) Add(r AddrRange) {
   986  	if !a.mutable {
   987  		throw("attempt to mutate immutable AddrRanges")
   988  	}
   989  	a.add(r.addrRange)
   990  }
   991  
   992  // TotalBytes returns the totalBytes field of the addrRanges.
   993  func (a *AddrRanges) TotalBytes() uintptr {
   994  	return a.addrRanges.totalBytes
   995  }
   996  
   997  // BitRange represents a range over a bitmap.
   998  type BitRange struct {
   999  	I, N uint // bit index and length in bits
  1000  }
  1001  
  1002  // NewPageAlloc creates a new page allocator for testing and
  1003  // initializes it with the scav and chunks maps. Each key in these maps
  1004  // represents a chunk index and each value is a series of bit ranges to
  1005  // set within each bitmap's chunk.
  1006  //
  1007  // The initialization of the pageAlloc preserves the invariant that if a
  1008  // scavenged bit is set the alloc bit is necessarily unset, so some
  1009  // of the bits described by scav may be cleared in the final bitmap if
  1010  // ranges in chunks overlap with them.
  1011  //
  1012  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1013  // (as opposed to all 1s, which it usually is). Furthermore, every
  1014  // chunk index in scav must appear in chunks; ones that do not are
  1015  // ignored.
  1016  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1017  	p := new(pageAlloc)
  1018  
  1019  	// We've got an entry, so initialize the pageAlloc.
  1020  	p.init(new(mutex), testSysStat, true)
  1021  	lockInit(p.mheapLock, lockRankMheap)
  1022  	for i, init := range chunks {
  1023  		addr := chunkBase(chunkIdx(i))
  1024  
  1025  		// Mark the chunk's existence in the pageAlloc.
  1026  		systemstack(func() {
  1027  			lock(p.mheapLock)
  1028  			p.grow(addr, pallocChunkBytes)
  1029  			unlock(p.mheapLock)
  1030  		})
  1031  
  1032  		// Initialize the bitmap and update pageAlloc metadata.
  1033  		ci := chunkIndex(addr)
  1034  		chunk := p.chunkOf(ci)
  1035  
  1036  		// Clear all the scavenged bits which grow set.
  1037  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1038  
  1039  		// Simulate the allocation and subsequent free of all pages in
  1040  		// the chunk for the scavenge index. This sets the state equivalent
  1041  		// with all pages within the index being free.
  1042  		p.scav.index.alloc(ci, pallocChunkPages)
  1043  		p.scav.index.free(ci, 0, pallocChunkPages)
  1044  
  1045  		// Apply scavenge state if applicable.
  1046  		if scav != nil {
  1047  			if scvg, ok := scav[i]; ok {
  1048  				for _, s := range scvg {
  1049  					// Ignore the case of s.N == 0. setRange doesn't handle
  1050  					// it and it's a no-op anyway.
  1051  					if s.N != 0 {
  1052  						chunk.scavenged.setRange(s.I, s.N)
  1053  					}
  1054  				}
  1055  			}
  1056  		}
  1057  
  1058  		// Apply alloc state.
  1059  		for _, s := range init {
  1060  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1061  			// it and it's a no-op anyway.
  1062  			if s.N != 0 {
  1063  				chunk.allocRange(s.I, s.N)
  1064  
  1065  				// Make sure the scavenge index is updated.
  1066  				p.scav.index.alloc(ci, s.N)
  1067  			}
  1068  		}
  1069  
  1070  		// Update heap metadata for the allocRange calls above.
  1071  		systemstack(func() {
  1072  			lock(p.mheapLock)
  1073  			p.update(addr, pallocChunkPages, false, false)
  1074  			unlock(p.mheapLock)
  1075  		})
  1076  	}
  1077  
  1078  	return (*PageAlloc)(p)
  1079  }
  1080  
  1081  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1082  // is called the pageAlloc may no longer be used. The object itself will be
  1083  // collected by the garbage collector once it is no longer live.
  1084  func FreePageAlloc(pp *PageAlloc) {
  1085  	p := (*pageAlloc)(pp)
  1086  
  1087  	// Free all the mapped space for the summary levels.
  1088  	if pageAlloc64Bit != 0 {
  1089  		for l := 0; l < summaryLevels; l++ {
  1090  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1091  		}
  1092  	} else {
  1093  		resSize := uintptr(0)
  1094  		for _, s := range p.summary {
  1095  			resSize += uintptr(cap(s)) * pallocSumBytes
  1096  		}
  1097  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1098  	}
  1099  
  1100  	// Free extra data structures.
  1101  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1102  
  1103  	// Subtract back out whatever we mapped for the summaries.
  1104  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1105  	// (and in anger should actually be accounted for), and there's no other
  1106  	// way to figure out how much we actually mapped.
  1107  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1108  	testSysStat.add(-int64(p.summaryMappedReady))
  1109  
  1110  	// Free the mapped space for chunks.
  1111  	for i := range p.chunks {
  1112  		if x := p.chunks[i]; x != nil {
  1113  			p.chunks[i] = nil
  1114  			// This memory comes from sysAlloc and will always be page-aligned.
  1115  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1116  		}
  1117  	}
  1118  }
  1119  
  1120  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1121  // 64 bit and 32 bit platforms, allowing the tests to share code
  1122  // between the two.
  1123  //
  1124  // This should not be higher than 0x100*pallocChunkBytes to support
  1125  // mips and mipsle, which only have 31-bit address spaces.
  1126  var BaseChunkIdx = func() ChunkIdx {
  1127  	var prefix uintptr
  1128  	if pageAlloc64Bit != 0 {
  1129  		prefix = 0xc000
  1130  	} else {
  1131  		prefix = 0x100
  1132  	}
  1133  	baseAddr := prefix * pallocChunkBytes
  1134  	if goos.IsAix != 0 {
  1135  		baseAddr += arenaBaseOffset
  1136  	}
  1137  	return ChunkIdx(chunkIndex(baseAddr))
  1138  }()
  1139  
  1140  // PageBase returns an address given a chunk index and a page index
  1141  // relative to that chunk.
  1142  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1143  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1144  }
  1145  
  1146  type BitsMismatch struct {
  1147  	Base      uintptr
  1148  	Got, Want uint64
  1149  }
  1150  
  1151  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1152  	ok = true
  1153  
  1154  	// Run on the system stack to avoid stack growth allocation.
  1155  	systemstack(func() {
  1156  		getg().m.mallocing++
  1157  
  1158  		// Lock so that we can safely access the bitmap.
  1159  		lock(&mheap_.lock)
  1160  	chunkLoop:
  1161  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1162  			chunk := mheap_.pages.tryChunkOf(i)
  1163  			if chunk == nil {
  1164  				continue
  1165  			}
  1166  			for j := 0; j < pallocChunkPages/64; j++ {
  1167  				// Run over each 64-bit bitmap section and ensure
  1168  				// scavenged is being cleared properly on allocation.
  1169  				// If a used bit and scavenged bit are both set, that's
  1170  				// an error, and could indicate a larger problem, or
  1171  				// an accounting problem.
  1172  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1173  				got := chunk.scavenged[j]
  1174  				if want != got {
  1175  					ok = false
  1176  					if n >= len(mismatches) {
  1177  						break chunkLoop
  1178  					}
  1179  					mismatches[n] = BitsMismatch{
  1180  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1181  						Got:  got,
  1182  						Want: want,
  1183  					}
  1184  					n++
  1185  				}
  1186  			}
  1187  		}
  1188  		unlock(&mheap_.lock)
  1189  
  1190  		getg().m.mallocing--
  1191  	})
  1192  	return
  1193  }
  1194  
  1195  func PageCachePagesLeaked() (leaked uintptr) {
  1196  	stopTheWorld("PageCachePagesLeaked")
  1197  
  1198  	// Walk over destroyed Ps and look for unflushed caches.
  1199  	deadp := allp[len(allp):cap(allp)]
  1200  	for _, p := range deadp {
  1201  		// Since we're going past len(allp) we may see nil Ps.
  1202  		// Just ignore them.
  1203  		if p != nil {
  1204  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1205  		}
  1206  	}
  1207  
  1208  	startTheWorld()
  1209  	return
  1210  }
  1211  
  1212  var Semacquire = semacquire
  1213  var Semrelease1 = semrelease1
  1214  
  1215  func SemNwait(addr *uint32) uint32 {
  1216  	root := semtable.rootFor(addr)
  1217  	return root.nwait.Load()
  1218  }
  1219  
  1220  const SemTableSize = semTabSize
  1221  
  1222  // SemTable is a wrapper around semTable exported for testing.
  1223  type SemTable struct {
  1224  	semTable
  1225  }
  1226  
  1227  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1228  func (t *SemTable) Enqueue(addr *uint32) {
  1229  	s := acquireSudog()
  1230  	s.releasetime = 0
  1231  	s.acquiretime = 0
  1232  	s.ticket = 0
  1233  	t.semTable.rootFor(addr).queue(addr, s, false)
  1234  }
  1235  
  1236  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1237  //
  1238  // Returns true if there actually was a waiter to be dequeued.
  1239  func (t *SemTable) Dequeue(addr *uint32) bool {
  1240  	s, _ := t.semTable.rootFor(addr).dequeue(addr)
  1241  	if s != nil {
  1242  		releaseSudog(s)
  1243  		return true
  1244  	}
  1245  	return false
  1246  }
  1247  
  1248  // mspan wrapper for testing.
  1249  type MSpan mspan
  1250  
  1251  // Allocate an mspan for testing.
  1252  func AllocMSpan() *MSpan {
  1253  	var s *mspan
  1254  	systemstack(func() {
  1255  		lock(&mheap_.lock)
  1256  		s = (*mspan)(mheap_.spanalloc.alloc())
  1257  		unlock(&mheap_.lock)
  1258  	})
  1259  	return (*MSpan)(s)
  1260  }
  1261  
  1262  // Free an allocated mspan.
  1263  func FreeMSpan(s *MSpan) {
  1264  	systemstack(func() {
  1265  		lock(&mheap_.lock)
  1266  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1267  		unlock(&mheap_.lock)
  1268  	})
  1269  }
  1270  
  1271  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1272  	s := (*mspan)(ms)
  1273  	s.nelems = uintptr(len(bits) * 8)
  1274  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1275  	result := s.countAlloc()
  1276  	s.gcmarkBits = nil
  1277  	return result
  1278  }
  1279  
  1280  const (
  1281  	TimeHistSubBucketBits = timeHistSubBucketBits
  1282  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1283  	TimeHistNumBuckets    = timeHistNumBuckets
  1284  	TimeHistMinBucketBits = timeHistMinBucketBits
  1285  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1286  )
  1287  
  1288  type TimeHistogram timeHistogram
  1289  
  1290  // Counts returns the counts for the given bucket, subBucket indices.
  1291  // Returns true if the bucket was valid, otherwise returns the counts
  1292  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1293  // bucket < 0, and false.
  1294  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1295  	t := (*timeHistogram)(th)
  1296  	if bucket < 0 {
  1297  		return t.underflow.Load(), false
  1298  	}
  1299  	i := bucket*TimeHistNumSubBuckets + subBucket
  1300  	if i >= len(t.counts) {
  1301  		return t.overflow.Load(), false
  1302  	}
  1303  	return t.counts[i].Load(), true
  1304  }
  1305  
  1306  func (th *TimeHistogram) Record(duration int64) {
  1307  	(*timeHistogram)(th).record(duration)
  1308  }
  1309  
  1310  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1311  
  1312  func SetIntArgRegs(a int) int {
  1313  	lock(&finlock)
  1314  	old := intArgRegs
  1315  	if a >= 0 {
  1316  		intArgRegs = a
  1317  	}
  1318  	unlock(&finlock)
  1319  	return old
  1320  }
  1321  
  1322  func FinalizerGAsleep() bool {
  1323  	return fingStatus.Load()&fingWait != 0
  1324  }
  1325  
  1326  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1327  // extra layer of call, since then there's a return before the "real"
  1328  // next call.
  1329  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1330  
  1331  // For GCTestIsReachable, it's important that we do this as a call so
  1332  // escape analysis can see through it.
  1333  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1334  	return gcTestIsReachable(ptrs...)
  1335  }
  1336  
  1337  // For GCTestPointerClass, it's important that we do this as a call so
  1338  // escape analysis can see through it.
  1339  //
  1340  // This is nosplit because gcTestPointerClass is.
  1341  //
  1342  //go:nosplit
  1343  func GCTestPointerClass(p unsafe.Pointer) string {
  1344  	return gcTestPointerClass(p)
  1345  }
  1346  
  1347  const Raceenabled = raceenabled
  1348  
  1349  const (
  1350  	GCBackgroundUtilization            = gcBackgroundUtilization
  1351  	GCGoalUtilization                  = gcGoalUtilization
  1352  	DefaultHeapMinimum                 = defaultHeapMinimum
  1353  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1354  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1355  )
  1356  
  1357  type GCController struct {
  1358  	gcControllerState
  1359  }
  1360  
  1361  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1362  	// Force the controller to escape. We're going to
  1363  	// do 64-bit atomics on it, and if it gets stack-allocated
  1364  	// on a 32-bit architecture, it may get allocated unaligned
  1365  	// space.
  1366  	g := Escape(new(GCController))
  1367  	g.gcControllerState.test = true // Mark it as a test copy.
  1368  	g.init(int32(gcPercent), memoryLimit)
  1369  	return g
  1370  }
  1371  
  1372  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1373  	trigger, _ := c.trigger()
  1374  	if c.heapMarked > trigger {
  1375  		trigger = c.heapMarked
  1376  	}
  1377  	c.maxStackScan.Store(stackSize)
  1378  	c.globalsScan.Store(globalsSize)
  1379  	c.heapLive.Store(trigger)
  1380  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1381  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1382  }
  1383  
  1384  func (c *GCController) AssistWorkPerByte() float64 {
  1385  	return c.assistWorkPerByte.Load()
  1386  }
  1387  
  1388  func (c *GCController) HeapGoal() uint64 {
  1389  	return c.heapGoal()
  1390  }
  1391  
  1392  func (c *GCController) HeapLive() uint64 {
  1393  	return c.heapLive.Load()
  1394  }
  1395  
  1396  func (c *GCController) HeapMarked() uint64 {
  1397  	return c.heapMarked
  1398  }
  1399  
  1400  func (c *GCController) Triggered() uint64 {
  1401  	return c.triggered
  1402  }
  1403  
  1404  type GCControllerReviseDelta struct {
  1405  	HeapLive        int64
  1406  	HeapScan        int64
  1407  	HeapScanWork    int64
  1408  	StackScanWork   int64
  1409  	GlobalsScanWork int64
  1410  }
  1411  
  1412  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1413  	c.heapLive.Add(d.HeapLive)
  1414  	c.heapScan.Add(d.HeapScan)
  1415  	c.heapScanWork.Add(d.HeapScanWork)
  1416  	c.stackScanWork.Add(d.StackScanWork)
  1417  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1418  	c.revise()
  1419  }
  1420  
  1421  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1422  	c.assistTime.Store(assistTime)
  1423  	c.endCycle(elapsed, gomaxprocs, false)
  1424  	c.resetLive(bytesMarked)
  1425  	c.commit(false)
  1426  }
  1427  
  1428  func (c *GCController) AddIdleMarkWorker() bool {
  1429  	return c.addIdleMarkWorker()
  1430  }
  1431  
  1432  func (c *GCController) NeedIdleMarkWorker() bool {
  1433  	return c.needIdleMarkWorker()
  1434  }
  1435  
  1436  func (c *GCController) RemoveIdleMarkWorker() {
  1437  	c.removeIdleMarkWorker()
  1438  }
  1439  
  1440  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1441  	c.setMaxIdleMarkWorkers(max)
  1442  }
  1443  
  1444  var alwaysFalse bool
  1445  var escapeSink any
  1446  
  1447  func Escape[T any](x T) T {
  1448  	if alwaysFalse {
  1449  		escapeSink = x
  1450  	}
  1451  	return x
  1452  }
  1453  
  1454  // Acquirem blocks preemption.
  1455  func Acquirem() {
  1456  	acquirem()
  1457  }
  1458  
  1459  func Releasem() {
  1460  	releasem(getg().m)
  1461  }
  1462  
  1463  var Timediv = timediv
  1464  
  1465  type PIController struct {
  1466  	piController
  1467  }
  1468  
  1469  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1470  	return &PIController{piController{
  1471  		kp:  kp,
  1472  		ti:  ti,
  1473  		tt:  tt,
  1474  		min: min,
  1475  		max: max,
  1476  	}}
  1477  }
  1478  
  1479  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1480  	return c.piController.next(input, setpoint, period)
  1481  }
  1482  
  1483  const (
  1484  	CapacityPerProc          = capacityPerProc
  1485  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1486  )
  1487  
  1488  type GCCPULimiter struct {
  1489  	limiter gcCPULimiterState
  1490  }
  1491  
  1492  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1493  	// Force the controller to escape. We're going to
  1494  	// do 64-bit atomics on it, and if it gets stack-allocated
  1495  	// on a 32-bit architecture, it may get allocated unaligned
  1496  	// space.
  1497  	l := Escape(new(GCCPULimiter))
  1498  	l.limiter.test = true
  1499  	l.limiter.resetCapacity(now, gomaxprocs)
  1500  	return l
  1501  }
  1502  
  1503  func (l *GCCPULimiter) Fill() uint64 {
  1504  	return l.limiter.bucket.fill
  1505  }
  1506  
  1507  func (l *GCCPULimiter) Capacity() uint64 {
  1508  	return l.limiter.bucket.capacity
  1509  }
  1510  
  1511  func (l *GCCPULimiter) Overflow() uint64 {
  1512  	return l.limiter.overflow
  1513  }
  1514  
  1515  func (l *GCCPULimiter) Limiting() bool {
  1516  	return l.limiter.limiting()
  1517  }
  1518  
  1519  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1520  	return l.limiter.needUpdate(now)
  1521  }
  1522  
  1523  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1524  	l.limiter.startGCTransition(enableGC, now)
  1525  }
  1526  
  1527  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1528  	l.limiter.finishGCTransition(now)
  1529  }
  1530  
  1531  func (l *GCCPULimiter) Update(now int64) {
  1532  	l.limiter.update(now)
  1533  }
  1534  
  1535  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1536  	l.limiter.addAssistTime(t)
  1537  }
  1538  
  1539  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1540  	l.limiter.resetCapacity(now, nprocs)
  1541  }
  1542  
  1543  const ScavengePercent = scavengePercent
  1544  
  1545  type Scavenger struct {
  1546  	Sleep      func(int64) int64
  1547  	Scavenge   func(uintptr) (uintptr, int64)
  1548  	ShouldStop func() bool
  1549  	GoMaxProcs func() int32
  1550  
  1551  	released  atomic.Uintptr
  1552  	scavenger scavengerState
  1553  	stop      chan<- struct{}
  1554  	done      <-chan struct{}
  1555  }
  1556  
  1557  func (s *Scavenger) Start() {
  1558  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1559  		panic("must populate all stubs")
  1560  	}
  1561  
  1562  	// Install hooks.
  1563  	s.scavenger.sleepStub = s.Sleep
  1564  	s.scavenger.scavenge = s.Scavenge
  1565  	s.scavenger.shouldStop = s.ShouldStop
  1566  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1567  
  1568  	// Start up scavenger goroutine, and wait for it to be ready.
  1569  	stop := make(chan struct{})
  1570  	s.stop = stop
  1571  	done := make(chan struct{})
  1572  	s.done = done
  1573  	go func() {
  1574  		// This should match bgscavenge, loosely.
  1575  		s.scavenger.init()
  1576  		s.scavenger.park()
  1577  		for {
  1578  			select {
  1579  			case <-stop:
  1580  				close(done)
  1581  				return
  1582  			default:
  1583  			}
  1584  			released, workTime := s.scavenger.run()
  1585  			if released == 0 {
  1586  				s.scavenger.park()
  1587  				continue
  1588  			}
  1589  			s.released.Add(released)
  1590  			s.scavenger.sleep(workTime)
  1591  		}
  1592  	}()
  1593  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1594  		panic("timed out waiting for scavenger to get ready")
  1595  	}
  1596  }
  1597  
  1598  // BlockUntilParked blocks until the scavenger parks, or until
  1599  // timeout is exceeded. Returns true if the scavenger parked.
  1600  //
  1601  // Note that in testing, parked means something slightly different.
  1602  // In anger, the scavenger parks to sleep, too, but in testing,
  1603  // it only parks when it actually has no work to do.
  1604  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1605  	// Just spin, waiting for it to park.
  1606  	//
  1607  	// The actual parking process is racy with respect to
  1608  	// wakeups, which is fine, but for testing we need something
  1609  	// a bit more robust.
  1610  	start := nanotime()
  1611  	for nanotime()-start < timeout {
  1612  		lock(&s.scavenger.lock)
  1613  		parked := s.scavenger.parked
  1614  		unlock(&s.scavenger.lock)
  1615  		if parked {
  1616  			return true
  1617  		}
  1618  		Gosched()
  1619  	}
  1620  	return false
  1621  }
  1622  
  1623  // Released returns how many bytes the scavenger released.
  1624  func (s *Scavenger) Released() uintptr {
  1625  	return s.released.Load()
  1626  }
  1627  
  1628  // Wake wakes up a parked scavenger to keep running.
  1629  func (s *Scavenger) Wake() {
  1630  	s.scavenger.wake()
  1631  }
  1632  
  1633  // Stop cleans up the scavenger's resources. The scavenger
  1634  // must be parked for this to work.
  1635  func (s *Scavenger) Stop() {
  1636  	lock(&s.scavenger.lock)
  1637  	parked := s.scavenger.parked
  1638  	unlock(&s.scavenger.lock)
  1639  	if !parked {
  1640  		panic("tried to clean up scavenger that is not parked")
  1641  	}
  1642  	close(s.stop)
  1643  	s.Wake()
  1644  	<-s.done
  1645  }
  1646  
  1647  type ScavengeIndex struct {
  1648  	i scavengeIndex
  1649  }
  1650  
  1651  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1652  	s := new(ScavengeIndex)
  1653  	// This is a bit lazy but we easily guarantee we'll be able
  1654  	// to reference all the relevant chunks. The worst-case
  1655  	// memory usage here is 512 MiB, but tests generally use
  1656  	// small offsets from BaseChunkIdx, which results in ~100s
  1657  	// of KiB in memory use.
  1658  	//
  1659  	// This may still be worth making better, at least by sharing
  1660  	// this fairly large array across calls with a sync.Pool or
  1661  	// something. Currently, when the tests are run serially,
  1662  	// it takes around 0.5s. Not all that much, but if we have
  1663  	// a lot of tests like this it could add up.
  1664  	s.i.chunks = make([]atomicScavChunkData, max)
  1665  	s.i.min.Store(uintptr(min))
  1666  	s.i.max.Store(uintptr(max))
  1667  	s.i.minHeapIdx.Store(uintptr(min))
  1668  	s.i.test = true
  1669  	return s
  1670  }
  1671  
  1672  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1673  	ci, off := s.i.find(force)
  1674  	return ChunkIdx(ci), off
  1675  }
  1676  
  1677  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1678  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1679  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1680  
  1681  	if sc == ec {
  1682  		// The range doesn't cross any chunk boundaries.
  1683  		s.i.alloc(sc, ei+1-si)
  1684  	} else {
  1685  		// The range crosses at least one chunk boundary.
  1686  		s.i.alloc(sc, pallocChunkPages-si)
  1687  		for c := sc + 1; c < ec; c++ {
  1688  			s.i.alloc(c, pallocChunkPages)
  1689  		}
  1690  		s.i.alloc(ec, ei+1)
  1691  	}
  1692  }
  1693  
  1694  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1695  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1696  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1697  
  1698  	if sc == ec {
  1699  		// The range doesn't cross any chunk boundaries.
  1700  		s.i.free(sc, si, ei+1-si)
  1701  	} else {
  1702  		// The range crosses at least one chunk boundary.
  1703  		s.i.free(sc, si, pallocChunkPages-si)
  1704  		for c := sc + 1; c < ec; c++ {
  1705  			s.i.free(c, 0, pallocChunkPages)
  1706  		}
  1707  		s.i.free(ec, 0, ei+1)
  1708  	}
  1709  }
  1710  
  1711  func (s *ScavengeIndex) ResetSearchAddrs() {
  1712  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1713  		addr, marked := a.Load()
  1714  		if marked {
  1715  			a.StoreUnmark(addr, addr)
  1716  		}
  1717  		a.Clear()
  1718  	}
  1719  	s.i.freeHWM = minOffAddr
  1720  }
  1721  
  1722  func (s *ScavengeIndex) NextGen() {
  1723  	s.i.nextGen()
  1724  }
  1725  
  1726  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1727  	s.i.setEmpty(chunkIdx(ci))
  1728  }
  1729  
  1730  func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) bool {
  1731  	return s.i.setNoHugePage(chunkIdx(ci))
  1732  }
  1733  
  1734  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1735  	sc0 := scavChunkData{
  1736  		gen:            gen,
  1737  		inUse:          inUse,
  1738  		lastInUse:      lastInUse,
  1739  		scavChunkFlags: scavChunkFlags(flags),
  1740  	}
  1741  	scp := sc0.pack()
  1742  	sc1 := unpackScavChunkData(scp)
  1743  	return sc0 == sc1
  1744  }
  1745  
  1746  const GTrackingPeriod = gTrackingPeriod
  1747  
  1748  var ZeroBase = unsafe.Pointer(&zerobase)
  1749  
  1750  const UserArenaChunkBytes = userArenaChunkBytes
  1751  
  1752  type UserArena struct {
  1753  	arena *userArena
  1754  }
  1755  
  1756  func NewUserArena() *UserArena {
  1757  	return &UserArena{newUserArena()}
  1758  }
  1759  
  1760  func (a *UserArena) New(out *any) {
  1761  	i := efaceOf(out)
  1762  	typ := i._type
  1763  	if typ.Kind_&kindMask != kindPtr {
  1764  		panic("new result of non-ptr type")
  1765  	}
  1766  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1767  	i.data = a.arena.new(typ)
  1768  }
  1769  
  1770  func (a *UserArena) Slice(sl any, cap int) {
  1771  	a.arena.slice(sl, cap)
  1772  }
  1773  
  1774  func (a *UserArena) Free() {
  1775  	a.arena.free()
  1776  }
  1777  
  1778  func GlobalWaitingArenaChunks() int {
  1779  	n := 0
  1780  	systemstack(func() {
  1781  		lock(&mheap_.lock)
  1782  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1783  			n++
  1784  		}
  1785  		unlock(&mheap_.lock)
  1786  	})
  1787  	return n
  1788  }
  1789  
  1790  func UserArenaClone[T any](s T) T {
  1791  	return arena_heapify(s).(T)
  1792  }
  1793  
  1794  var AlignUp = alignUp
  1795  
  1796  // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
  1797  // queue is emptied (and the finalizers have executed) or the timeout
  1798  // is reached. Returns true if the finalizer queue was emptied.
  1799  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1800  	start := nanotime()
  1801  	for nanotime()-start < timeout {
  1802  		lock(&finlock)
  1803  		// We know the queue has been drained when both finq is nil
  1804  		// and the finalizer g has stopped executing.
  1805  		empty := finq == nil
  1806  		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
  1807  		unlock(&finlock)
  1808  		if empty {
  1809  			return true
  1810  		}
  1811  		Gosched()
  1812  	}
  1813  	return false
  1814  }
  1815  
  1816  func FrameStartLine(f *Frame) int {
  1817  	return f.startLine
  1818  }
  1819  
  1820  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1821  // This memory will never be freed; use sparingly.
  1822  func PersistentAlloc(n uintptr) unsafe.Pointer {
  1823  	return persistentalloc(n, 0, &memstats.other_sys)
  1824  }
  1825  
  1826  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1827  // pcBuf with the return addresses of the physical frames on the stack.
  1828  func FPCallers(pcBuf []uintptr) int {
  1829  	return fpTracebackPCs(unsafe.Pointer(getcallerfp()), pcBuf)
  1830  }
  1831  
  1832  var (
  1833  	IsPinned      = isPinned
  1834  	GetPinCounter = pinnerGetPinCounter
  1835  )
  1836  
  1837  func SetPinnerLeakPanic(f func()) {
  1838  	pinnerLeakPanic = f
  1839  }
  1840  func GetPinnerLeakPanic() func() {
  1841  	return pinnerLeakPanic
  1842  }