github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  var Fadd64 = fadd64
    16  var Fsub64 = fsub64
    17  var Fmul64 = fmul64
    18  var Fdiv64 = fdiv64
    19  var F64to32 = f64to32
    20  var F32to64 = f32to64
    21  var Fcmp64 = fcmp64
    22  var Fintto64 = fintto64
    23  var F64toint = f64toint
    24  
    25  var Entersyscall = entersyscall
    26  var Exitsyscall = exitsyscall
    27  var LockedOSThread = lockedOSThread
    28  var Xadduintptr = atomic.Xadduintptr
    29  
    30  var FuncPC = funcPC
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  
    37  var Nanotime = nanotime
    38  var NetpollBreak = netpollBreak
    39  var Usleep = usleep
    40  
    41  var PhysPageSize = physPageSize
    42  var PhysHugePageSize = physHugePageSize
    43  
    44  var NetpollGenericInit = netpollGenericInit
    45  
    46  var Memmove = memmove
    47  var MemclrNoHeapPointers = memclrNoHeapPointers
    48  
    49  const PreemptMSupported = preemptMSupported
    50  
    51  type LFNode struct {
    52  	Next    uint64
    53  	Pushcnt uintptr
    54  }
    55  
    56  func LFStackPush(head *uint64, node *LFNode) {
    57  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    58  }
    59  
    60  func LFStackPop(head *uint64) *LFNode {
    61  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    62  }
    63  
    64  func Netpoll(delta int64) {
    65  	systemstack(func() {
    66  		netpoll(delta)
    67  	})
    68  }
    69  
    70  func GCMask(x interface{}) (ret []byte) {
    71  	systemstack(func() {
    72  		ret = getgcmask(x)
    73  	})
    74  	return
    75  }
    76  
    77  func RunSchedLocalQueueTest() {
    78  	_p_ := new(p)
    79  	gs := make([]g, len(_p_.runq))
    80  	for i := 0; i < len(_p_.runq); i++ {
    81  		if g, _ := runqget(_p_); g != nil {
    82  			throw("runq is not empty initially")
    83  		}
    84  		for j := 0; j < i; j++ {
    85  			runqput(_p_, &gs[i], false)
    86  		}
    87  		for j := 0; j < i; j++ {
    88  			if g, _ := runqget(_p_); g != &gs[i] {
    89  				print("bad element at iter ", i, "/", j, "\n")
    90  				throw("bad element")
    91  			}
    92  		}
    93  		if g, _ := runqget(_p_); g != nil {
    94  			throw("runq is not empty afterwards")
    95  		}
    96  	}
    97  }
    98  
    99  func RunSchedLocalQueueStealTest() {
   100  	p1 := new(p)
   101  	p2 := new(p)
   102  	gs := make([]g, len(p1.runq))
   103  	for i := 0; i < len(p1.runq); i++ {
   104  		for j := 0; j < i; j++ {
   105  			gs[j].sig = 0
   106  			runqput(p1, &gs[j], false)
   107  		}
   108  		gp := runqsteal(p2, p1, true)
   109  		s := 0
   110  		if gp != nil {
   111  			s++
   112  			gp.sig++
   113  		}
   114  		for {
   115  			gp, _ = runqget(p2)
   116  			if gp == nil {
   117  				break
   118  			}
   119  			s++
   120  			gp.sig++
   121  		}
   122  		for {
   123  			gp, _ = runqget(p1)
   124  			if gp == nil {
   125  				break
   126  			}
   127  			gp.sig++
   128  		}
   129  		for j := 0; j < i; j++ {
   130  			if gs[j].sig != 1 {
   131  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   132  				throw("bad element")
   133  			}
   134  		}
   135  		if s != i/2 && s != i/2+1 {
   136  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   137  			throw("bad steal")
   138  		}
   139  	}
   140  }
   141  
   142  func RunSchedLocalQueueEmptyTest(iters int) {
   143  	// Test that runq is not spuriously reported as empty.
   144  	// Runq emptiness affects scheduling decisions and spurious emptiness
   145  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   146  	// for arbitrary long time).
   147  	done := make(chan bool, 1)
   148  	p := new(p)
   149  	gs := make([]g, 2)
   150  	ready := new(uint32)
   151  	for i := 0; i < iters; i++ {
   152  		*ready = 0
   153  		next0 := (i & 1) == 0
   154  		next1 := (i & 2) == 0
   155  		runqput(p, &gs[0], next0)
   156  		go func() {
   157  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   158  			}
   159  			if runqempty(p) {
   160  				println("next:", next0, next1)
   161  				throw("queue is empty")
   162  			}
   163  			done <- true
   164  		}()
   165  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   166  		}
   167  		runqput(p, &gs[1], next1)
   168  		runqget(p)
   169  		<-done
   170  		runqget(p)
   171  	}
   172  }
   173  
   174  var (
   175  	StringHash = stringHash
   176  	BytesHash  = bytesHash
   177  	Int32Hash  = int32Hash
   178  	Int64Hash  = int64Hash
   179  	MemHash    = memhash
   180  	MemHash32  = memhash32
   181  	MemHash64  = memhash64
   182  	EfaceHash  = efaceHash
   183  	IfaceHash  = ifaceHash
   184  )
   185  
   186  var UseAeshash = &useAeshash
   187  
   188  func MemclrBytes(b []byte) {
   189  	s := (*slice)(unsafe.Pointer(&b))
   190  	memclrNoHeapPointers(s.array, uintptr(s.len))
   191  }
   192  
   193  var HashLoad = &hashLoad
   194  
   195  // entry point for testing
   196  func GostringW(w []uint16) (s string) {
   197  	systemstack(func() {
   198  		s = gostringw(&w[0])
   199  	})
   200  	return
   201  }
   202  
   203  type Uintreg sys.Uintreg
   204  
   205  var Open = open
   206  var Close = closefd
   207  var Read = read
   208  var Write = write
   209  
   210  func Envs() []string     { return envs }
   211  func SetEnvs(e []string) { envs = e }
   212  
   213  var BigEndian = sys.BigEndian
   214  
   215  // For benchmarking.
   216  
   217  func BenchSetType(n int, x interface{}) {
   218  	e := *efaceOf(&x)
   219  	t := e._type
   220  	var size uintptr
   221  	var p unsafe.Pointer
   222  	switch t.kind & kindMask {
   223  	case kindPtr:
   224  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   225  		size = t.size
   226  		p = e.data
   227  	case kindSlice:
   228  		slice := *(*struct {
   229  			ptr      unsafe.Pointer
   230  			len, cap uintptr
   231  		})(e.data)
   232  		t = (*slicetype)(unsafe.Pointer(t)).elem
   233  		size = t.size * slice.len
   234  		p = slice.ptr
   235  	}
   236  	allocSize := roundupsize(size)
   237  	systemstack(func() {
   238  		for i := 0; i < n; i++ {
   239  			heapBitsSetType(uintptr(p), allocSize, size, t)
   240  		}
   241  	})
   242  }
   243  
   244  const PtrSize = sys.PtrSize
   245  
   246  var ForceGCPeriod = &forcegcperiod
   247  
   248  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   249  // the "environment" traceback level, so later calls to
   250  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   251  func SetTracebackEnv(level string) {
   252  	setTraceback(level)
   253  	traceback_env = traceback_cache
   254  }
   255  
   256  var ReadUnaligned32 = readUnaligned32
   257  var ReadUnaligned64 = readUnaligned64
   258  
   259  func CountPagesInUse() (pagesInUse, counted uintptr) {
   260  	stopTheWorld("CountPagesInUse")
   261  
   262  	pagesInUse = uintptr(mheap_.pagesInUse)
   263  
   264  	for _, s := range mheap_.allspans {
   265  		if s.state.get() == mSpanInUse {
   266  			counted += s.npages
   267  		}
   268  	}
   269  
   270  	startTheWorld()
   271  
   272  	return
   273  }
   274  
   275  func Fastrand() uint32          { return fastrand() }
   276  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   277  
   278  type ProfBuf profBuf
   279  
   280  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   281  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   282  }
   283  
   284  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   285  	(*profBuf)(p).write(tag, now, hdr, stk)
   286  }
   287  
   288  const (
   289  	ProfBufBlocking    = profBufBlocking
   290  	ProfBufNonBlocking = profBufNonBlocking
   291  )
   292  
   293  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   294  	return (*profBuf)(p).read(profBufReadMode(mode))
   295  }
   296  
   297  func (p *ProfBuf) Close() {
   298  	(*profBuf)(p).close()
   299  }
   300  
   301  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   302  	stopTheWorld("ReadMetricsSlow")
   303  
   304  	// Initialize the metrics beforehand because this could
   305  	// allocate and skew the stats.
   306  	semacquire(&metricsSema)
   307  	initMetrics()
   308  	semrelease(&metricsSema)
   309  
   310  	systemstack(func() {
   311  		// Read memstats first. It's going to flush
   312  		// the mcaches which readMetrics does not do, so
   313  		// going the other way around may result in
   314  		// inconsistent statistics.
   315  		readmemstats_m(memStats)
   316  	})
   317  
   318  	// Read metrics off the system stack.
   319  	//
   320  	// The only part of readMetrics that could allocate
   321  	// and skew the stats is initMetrics.
   322  	readMetrics(samplesp, len, cap)
   323  
   324  	startTheWorld()
   325  }
   326  
   327  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   328  // MemStats accumulated by scanning the heap.
   329  func ReadMemStatsSlow() (base, slow MemStats) {
   330  	stopTheWorld("ReadMemStatsSlow")
   331  
   332  	// Run on the system stack to avoid stack growth allocation.
   333  	systemstack(func() {
   334  		// Make sure stats don't change.
   335  		getg().m.mallocing++
   336  
   337  		readmemstats_m(&base)
   338  
   339  		// Initialize slow from base and zero the fields we're
   340  		// recomputing.
   341  		slow = base
   342  		slow.Alloc = 0
   343  		slow.TotalAlloc = 0
   344  		slow.Mallocs = 0
   345  		slow.Frees = 0
   346  		slow.HeapReleased = 0
   347  		var bySize [_NumSizeClasses]struct {
   348  			Mallocs, Frees uint64
   349  		}
   350  
   351  		// Add up current allocations in spans.
   352  		for _, s := range mheap_.allspans {
   353  			if s.state.get() != mSpanInUse {
   354  				continue
   355  			}
   356  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   357  				slow.Mallocs++
   358  				slow.Alloc += uint64(s.elemsize)
   359  			} else {
   360  				slow.Mallocs += uint64(s.allocCount)
   361  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   362  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   363  			}
   364  		}
   365  
   366  		// Add in frees by just reading the stats for those directly.
   367  		var m heapStatsDelta
   368  		memstats.heapStats.unsafeRead(&m)
   369  
   370  		// Collect per-sizeclass free stats.
   371  		var smallFree uint64
   372  		for i := 0; i < _NumSizeClasses; i++ {
   373  			slow.Frees += uint64(m.smallFreeCount[i])
   374  			bySize[i].Frees += uint64(m.smallFreeCount[i])
   375  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
   376  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
   377  		}
   378  		slow.Frees += memstats.tinyallocs + uint64(m.largeFreeCount)
   379  		slow.Mallocs += slow.Frees
   380  
   381  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
   382  
   383  		for i := range slow.BySize {
   384  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   385  			slow.BySize[i].Frees = bySize[i].Frees
   386  		}
   387  
   388  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   389  			chunk := mheap_.pages.tryChunkOf(i)
   390  			if chunk == nil {
   391  				continue
   392  			}
   393  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   394  			slow.HeapReleased += uint64(pg) * pageSize
   395  		}
   396  		for _, p := range allp {
   397  			pg := sys.OnesCount64(p.pcache.scav)
   398  			slow.HeapReleased += uint64(pg) * pageSize
   399  		}
   400  
   401  		// Unused space in the current arena also counts as released space.
   402  		slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base)
   403  
   404  		getg().m.mallocing--
   405  	})
   406  
   407  	startTheWorld()
   408  	return
   409  }
   410  
   411  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   412  // stderr, and blocks in a stack containing
   413  // "runtime.blockOnSystemStackInternal".
   414  func BlockOnSystemStack() {
   415  	systemstack(blockOnSystemStackInternal)
   416  }
   417  
   418  func blockOnSystemStackInternal() {
   419  	print("x\n")
   420  	lock(&deadlock)
   421  	lock(&deadlock)
   422  }
   423  
   424  type RWMutex struct {
   425  	rw rwmutex
   426  }
   427  
   428  func (rw *RWMutex) RLock() {
   429  	rw.rw.rlock()
   430  }
   431  
   432  func (rw *RWMutex) RUnlock() {
   433  	rw.rw.runlock()
   434  }
   435  
   436  func (rw *RWMutex) Lock() {
   437  	rw.rw.lock()
   438  }
   439  
   440  func (rw *RWMutex) Unlock() {
   441  	rw.rw.unlock()
   442  }
   443  
   444  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   445  
   446  func MapBucketsCount(m map[int]int) int {
   447  	h := *(**hmap)(unsafe.Pointer(&m))
   448  	return 1 << h.B
   449  }
   450  
   451  func MapBucketsPointerIsNil(m map[int]int) bool {
   452  	h := *(**hmap)(unsafe.Pointer(&m))
   453  	return h.buckets == nil
   454  }
   455  
   456  func LockOSCounts() (external, internal uint32) {
   457  	g := getg()
   458  	if g.m.lockedExt+g.m.lockedInt == 0 {
   459  		if g.lockedm != 0 {
   460  			panic("lockedm on non-locked goroutine")
   461  		}
   462  	} else {
   463  		if g.lockedm == 0 {
   464  			panic("nil lockedm on locked goroutine")
   465  		}
   466  	}
   467  	return g.m.lockedExt, g.m.lockedInt
   468  }
   469  
   470  //go:noinline
   471  func TracebackSystemstack(stk []uintptr, i int) int {
   472  	if i == 0 {
   473  		pc, sp := getcallerpc(), getcallersp()
   474  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
   475  	}
   476  	n := 0
   477  	systemstack(func() {
   478  		n = TracebackSystemstack(stk, i-1)
   479  	})
   480  	return n
   481  }
   482  
   483  func KeepNArenaHints(n int) {
   484  	hint := mheap_.arenaHints
   485  	for i := 1; i < n; i++ {
   486  		hint = hint.next
   487  		if hint == nil {
   488  			return
   489  		}
   490  	}
   491  	hint.next = nil
   492  }
   493  
   494  // MapNextArenaHint reserves a page at the next arena growth hint,
   495  // preventing the arena from growing there, and returns the range of
   496  // addresses that are no longer viable.
   497  func MapNextArenaHint() (start, end uintptr) {
   498  	hint := mheap_.arenaHints
   499  	addr := hint.addr
   500  	if hint.down {
   501  		start, end = addr-heapArenaBytes, addr
   502  		addr -= physPageSize
   503  	} else {
   504  		start, end = addr, addr+heapArenaBytes
   505  	}
   506  	sysReserve(unsafe.Pointer(addr), physPageSize)
   507  	return
   508  }
   509  
   510  func GetNextArenaHint() uintptr {
   511  	return mheap_.arenaHints.addr
   512  }
   513  
   514  type G = g
   515  
   516  type Sudog = sudog
   517  
   518  func Getg() *G {
   519  	return getg()
   520  }
   521  
   522  //go:noinline
   523  func PanicForTesting(b []byte, i int) byte {
   524  	return unexportedPanicForTesting(b, i)
   525  }
   526  
   527  //go:noinline
   528  func unexportedPanicForTesting(b []byte, i int) byte {
   529  	return b[i]
   530  }
   531  
   532  func G0StackOverflow() {
   533  	systemstack(func() {
   534  		stackOverflow(nil)
   535  	})
   536  }
   537  
   538  func stackOverflow(x *byte) {
   539  	var buf [256]byte
   540  	stackOverflow(&buf[0])
   541  }
   542  
   543  func MapTombstoneCheck(m map[int]int) {
   544  	// Make sure emptyOne and emptyRest are distributed correctly.
   545  	// We should have a series of filled and emptyOne cells, followed by
   546  	// a series of emptyRest cells.
   547  	h := *(**hmap)(unsafe.Pointer(&m))
   548  	i := interface{}(m)
   549  	t := *(**maptype)(unsafe.Pointer(&i))
   550  
   551  	for x := 0; x < 1<<h.B; x++ {
   552  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
   553  		n := 0
   554  		for b := b0; b != nil; b = b.overflow(t) {
   555  			for i := 0; i < bucketCnt; i++ {
   556  				if b.tophash[i] != emptyRest {
   557  					n++
   558  				}
   559  			}
   560  		}
   561  		k := 0
   562  		for b := b0; b != nil; b = b.overflow(t) {
   563  			for i := 0; i < bucketCnt; i++ {
   564  				if k < n && b.tophash[i] == emptyRest {
   565  					panic("early emptyRest")
   566  				}
   567  				if k >= n && b.tophash[i] != emptyRest {
   568  					panic("late non-emptyRest")
   569  				}
   570  				if k == n-1 && b.tophash[i] == emptyOne {
   571  					panic("last non-emptyRest entry is emptyOne")
   572  				}
   573  				k++
   574  			}
   575  		}
   576  	}
   577  }
   578  
   579  func RunGetgThreadSwitchTest() {
   580  	// Test that getg works correctly with thread switch.
   581  	// With gccgo, if we generate getg inlined, the backend
   582  	// may cache the address of the TLS variable, which
   583  	// will become invalid after a thread switch. This test
   584  	// checks that the bad caching doesn't happen.
   585  
   586  	ch := make(chan int)
   587  	go func(ch chan int) {
   588  		ch <- 5
   589  		LockOSThread()
   590  	}(ch)
   591  
   592  	g1 := getg()
   593  
   594  	// Block on a receive. This is likely to get us a thread
   595  	// switch. If we yield to the sender goroutine, it will
   596  	// lock the thread, forcing us to resume on a different
   597  	// thread.
   598  	<-ch
   599  
   600  	g2 := getg()
   601  	if g1 != g2 {
   602  		panic("g1 != g2")
   603  	}
   604  
   605  	// Also test getg after some control flow, as the
   606  	// backend is sensitive to control flow.
   607  	g3 := getg()
   608  	if g1 != g3 {
   609  		panic("g1 != g3")
   610  	}
   611  }
   612  
   613  const (
   614  	PageSize         = pageSize
   615  	PallocChunkPages = pallocChunkPages
   616  	PageAlloc64Bit   = pageAlloc64Bit
   617  	PallocSumBytes   = pallocSumBytes
   618  )
   619  
   620  // Expose pallocSum for testing.
   621  type PallocSum pallocSum
   622  
   623  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   624  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   625  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   626  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   627  
   628  // Expose pallocBits for testing.
   629  type PallocBits pallocBits
   630  
   631  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   632  	return (*pallocBits)(b).find(npages, searchIdx)
   633  }
   634  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   635  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   636  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   637  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   638  
   639  // SummarizeSlow is a slow but more obviously correct implementation
   640  // of (*pallocBits).summarize. Used for testing.
   641  func SummarizeSlow(b *PallocBits) PallocSum {
   642  	var start, max, end uint
   643  
   644  	const N = uint(len(b)) * 64
   645  	for start < N && (*pageBits)(b).get(start) == 0 {
   646  		start++
   647  	}
   648  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   649  		end++
   650  	}
   651  	run := uint(0)
   652  	for i := uint(0); i < N; i++ {
   653  		if (*pageBits)(b).get(i) == 0 {
   654  			run++
   655  		} else {
   656  			run = 0
   657  		}
   658  		if run > max {
   659  			max = run
   660  		}
   661  	}
   662  	return PackPallocSum(start, max, end)
   663  }
   664  
   665  // Expose non-trivial helpers for testing.
   666  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   667  
   668  // Given two PallocBits, returns a set of bit ranges where
   669  // they differ.
   670  func DiffPallocBits(a, b *PallocBits) []BitRange {
   671  	ba := (*pageBits)(a)
   672  	bb := (*pageBits)(b)
   673  
   674  	var d []BitRange
   675  	base, size := uint(0), uint(0)
   676  	for i := uint(0); i < uint(len(ba))*64; i++ {
   677  		if ba.get(i) != bb.get(i) {
   678  			if size == 0 {
   679  				base = i
   680  			}
   681  			size++
   682  		} else {
   683  			if size != 0 {
   684  				d = append(d, BitRange{base, size})
   685  			}
   686  			size = 0
   687  		}
   688  	}
   689  	if size != 0 {
   690  		d = append(d, BitRange{base, size})
   691  	}
   692  	return d
   693  }
   694  
   695  // StringifyPallocBits gets the bits in the bit range r from b,
   696  // and returns a string containing the bits as ASCII 0 and 1
   697  // characters.
   698  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   699  	str := ""
   700  	for j := r.I; j < r.I+r.N; j++ {
   701  		if (*pageBits)(b).get(j) != 0 {
   702  			str += "1"
   703  		} else {
   704  			str += "0"
   705  		}
   706  	}
   707  	return str
   708  }
   709  
   710  // Expose pallocData for testing.
   711  type PallocData pallocData
   712  
   713  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   714  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   715  }
   716  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   717  func (d *PallocData) ScavengedSetRange(i, n uint) {
   718  	(*pallocData)(d).scavenged.setRange(i, n)
   719  }
   720  func (d *PallocData) PallocBits() *PallocBits {
   721  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   722  }
   723  func (d *PallocData) Scavenged() *PallocBits {
   724  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   725  }
   726  
   727  // Expose fillAligned for testing.
   728  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   729  
   730  // Expose pageCache for testing.
   731  type PageCache pageCache
   732  
   733  const PageCachePages = pageCachePages
   734  
   735  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   736  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   737  }
   738  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   739  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   740  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   741  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   742  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   743  	return (*pageCache)(c).alloc(npages)
   744  }
   745  func (c *PageCache) Flush(s *PageAlloc) {
   746  	cp := (*pageCache)(c)
   747  	sp := (*pageAlloc)(s)
   748  
   749  	systemstack(func() {
   750  		// None of the tests need any higher-level locking, so we just
   751  		// take the lock internally.
   752  		lock(sp.mheapLock)
   753  		cp.flush(sp)
   754  		unlock(sp.mheapLock)
   755  	})
   756  }
   757  
   758  // Expose chunk index type.
   759  type ChunkIdx chunkIdx
   760  
   761  // Expose pageAlloc for testing. Note that because pageAlloc is
   762  // not in the heap, so is PageAlloc.
   763  type PageAlloc pageAlloc
   764  
   765  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   766  	pp := (*pageAlloc)(p)
   767  
   768  	var addr, scav uintptr
   769  	systemstack(func() {
   770  		// None of the tests need any higher-level locking, so we just
   771  		// take the lock internally.
   772  		lock(pp.mheapLock)
   773  		addr, scav = pp.alloc(npages)
   774  		unlock(pp.mheapLock)
   775  	})
   776  	return addr, scav
   777  }
   778  func (p *PageAlloc) AllocToCache() PageCache {
   779  	pp := (*pageAlloc)(p)
   780  
   781  	var c PageCache
   782  	systemstack(func() {
   783  		// None of the tests need any higher-level locking, so we just
   784  		// take the lock internally.
   785  		lock(pp.mheapLock)
   786  		c = PageCache(pp.allocToCache())
   787  		unlock(pp.mheapLock)
   788  	})
   789  	return c
   790  }
   791  func (p *PageAlloc) Free(base, npages uintptr) {
   792  	pp := (*pageAlloc)(p)
   793  
   794  	systemstack(func() {
   795  		// None of the tests need any higher-level locking, so we just
   796  		// take the lock internally.
   797  		lock(pp.mheapLock)
   798  		pp.free(base, npages)
   799  		unlock(pp.mheapLock)
   800  	})
   801  }
   802  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   803  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   804  }
   805  func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
   806  	pp := (*pageAlloc)(p)
   807  	systemstack(func() {
   808  		// None of the tests need any higher-level locking, so we just
   809  		// take the lock internally.
   810  		lock(pp.mheapLock)
   811  		r = pp.scavenge(nbytes, mayUnlock)
   812  		unlock(pp.mheapLock)
   813  	})
   814  	return
   815  }
   816  func (p *PageAlloc) InUse() []AddrRange {
   817  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   818  	for _, r := range p.inUse.ranges {
   819  		ranges = append(ranges, AddrRange{r})
   820  	}
   821  	return ranges
   822  }
   823  
   824  // Returns nil if the PallocData's L2 is missing.
   825  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   826  	ci := chunkIdx(i)
   827  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   828  }
   829  
   830  // AddrRange is a wrapper around addrRange for testing.
   831  type AddrRange struct {
   832  	addrRange
   833  }
   834  
   835  // MakeAddrRange creates a new address range.
   836  func MakeAddrRange(base, limit uintptr) AddrRange {
   837  	return AddrRange{makeAddrRange(base, limit)}
   838  }
   839  
   840  // Base returns the virtual base address of the address range.
   841  func (a AddrRange) Base() uintptr {
   842  	return a.addrRange.base.addr()
   843  }
   844  
   845  // Base returns the virtual address of the limit of the address range.
   846  func (a AddrRange) Limit() uintptr {
   847  	return a.addrRange.limit.addr()
   848  }
   849  
   850  // Equals returns true if the two address ranges are exactly equal.
   851  func (a AddrRange) Equals(b AddrRange) bool {
   852  	return a == b
   853  }
   854  
   855  // Size returns the size in bytes of the address range.
   856  func (a AddrRange) Size() uintptr {
   857  	return a.addrRange.size()
   858  }
   859  
   860  // AddrRanges is a wrapper around addrRanges for testing.
   861  type AddrRanges struct {
   862  	addrRanges
   863  	mutable bool
   864  }
   865  
   866  // NewAddrRanges creates a new empty addrRanges.
   867  //
   868  // Note that this initializes addrRanges just like in the
   869  // runtime, so its memory is persistentalloc'd. Call this
   870  // function sparingly since the memory it allocates is
   871  // leaked.
   872  //
   873  // This AddrRanges is mutable, so we can test methods like
   874  // Add.
   875  func NewAddrRanges() AddrRanges {
   876  	r := addrRanges{}
   877  	r.init(new(sysMemStat))
   878  	return AddrRanges{r, true}
   879  }
   880  
   881  // MakeAddrRanges creates a new addrRanges populated with
   882  // the ranges in a.
   883  //
   884  // The returned AddrRanges is immutable, so methods like
   885  // Add will fail.
   886  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   887  	// Methods that manipulate the backing store of addrRanges.ranges should
   888  	// not be used on the result from this function (e.g. add) since they may
   889  	// trigger reallocation. That would normally be fine, except the new
   890  	// backing store won't come from the heap, but from persistentalloc, so
   891  	// we'll leak some memory implicitly.
   892  	ranges := make([]addrRange, 0, len(a))
   893  	total := uintptr(0)
   894  	for _, r := range a {
   895  		ranges = append(ranges, r.addrRange)
   896  		total += r.Size()
   897  	}
   898  	return AddrRanges{addrRanges{
   899  		ranges:     ranges,
   900  		totalBytes: total,
   901  		sysStat:    new(sysMemStat),
   902  	}, false}
   903  }
   904  
   905  // Ranges returns a copy of the ranges described by the
   906  // addrRanges.
   907  func (a *AddrRanges) Ranges() []AddrRange {
   908  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   909  	for _, r := range a.addrRanges.ranges {
   910  		result = append(result, AddrRange{r})
   911  	}
   912  	return result
   913  }
   914  
   915  // FindSucc returns the successor to base. See addrRanges.findSucc
   916  // for more details.
   917  func (a *AddrRanges) FindSucc(base uintptr) int {
   918  	return a.findSucc(base)
   919  }
   920  
   921  // Add adds a new AddrRange to the AddrRanges.
   922  //
   923  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   924  // otherwise this method will throw.
   925  func (a *AddrRanges) Add(r AddrRange) {
   926  	if !a.mutable {
   927  		throw("attempt to mutate immutable AddrRanges")
   928  	}
   929  	a.add(r.addrRange)
   930  }
   931  
   932  // TotalBytes returns the totalBytes field of the addrRanges.
   933  func (a *AddrRanges) TotalBytes() uintptr {
   934  	return a.addrRanges.totalBytes
   935  }
   936  
   937  // BitRange represents a range over a bitmap.
   938  type BitRange struct {
   939  	I, N uint // bit index and length in bits
   940  }
   941  
   942  // NewPageAlloc creates a new page allocator for testing and
   943  // initializes it with the scav and chunks maps. Each key in these maps
   944  // represents a chunk index and each value is a series of bit ranges to
   945  // set within each bitmap's chunk.
   946  //
   947  // The initialization of the pageAlloc preserves the invariant that if a
   948  // scavenged bit is set the alloc bit is necessarily unset, so some
   949  // of the bits described by scav may be cleared in the final bitmap if
   950  // ranges in chunks overlap with them.
   951  //
   952  // scav is optional, and if nil, the scavenged bitmap will be cleared
   953  // (as opposed to all 1s, which it usually is). Furthermore, every
   954  // chunk index in scav must appear in chunks; ones that do not are
   955  // ignored.
   956  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
   957  	p := new(pageAlloc)
   958  
   959  	// We've got an entry, so initialize the pageAlloc.
   960  	p.init(new(mutex), nil)
   961  	lockInit(p.mheapLock, lockRankMheap)
   962  	p.test = true
   963  
   964  	for i, init := range chunks {
   965  		addr := chunkBase(chunkIdx(i))
   966  
   967  		// Mark the chunk's existence in the pageAlloc.
   968  		systemstack(func() {
   969  			lock(p.mheapLock)
   970  			p.grow(addr, pallocChunkBytes)
   971  			unlock(p.mheapLock)
   972  		})
   973  
   974  		// Initialize the bitmap and update pageAlloc metadata.
   975  		chunk := p.chunkOf(chunkIndex(addr))
   976  
   977  		// Clear all the scavenged bits which grow set.
   978  		chunk.scavenged.clearRange(0, pallocChunkPages)
   979  
   980  		// Apply scavenge state if applicable.
   981  		if scav != nil {
   982  			if scvg, ok := scav[i]; ok {
   983  				for _, s := range scvg {
   984  					// Ignore the case of s.N == 0. setRange doesn't handle
   985  					// it and it's a no-op anyway.
   986  					if s.N != 0 {
   987  						chunk.scavenged.setRange(s.I, s.N)
   988  					}
   989  				}
   990  			}
   991  		}
   992  
   993  		// Apply alloc state.
   994  		for _, s := range init {
   995  			// Ignore the case of s.N == 0. allocRange doesn't handle
   996  			// it and it's a no-op anyway.
   997  			if s.N != 0 {
   998  				chunk.allocRange(s.I, s.N)
   999  			}
  1000  		}
  1001  
  1002  		// Update heap metadata for the allocRange calls above.
  1003  		systemstack(func() {
  1004  			lock(p.mheapLock)
  1005  			p.update(addr, pallocChunkPages, false, false)
  1006  			unlock(p.mheapLock)
  1007  		})
  1008  	}
  1009  
  1010  	systemstack(func() {
  1011  		lock(p.mheapLock)
  1012  		p.scavengeStartGen()
  1013  		unlock(p.mheapLock)
  1014  	})
  1015  
  1016  	return (*PageAlloc)(p)
  1017  }
  1018  
  1019  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1020  // is called the pageAlloc may no longer be used. The object itself will be
  1021  // collected by the garbage collector once it is no longer live.
  1022  func FreePageAlloc(pp *PageAlloc) {
  1023  	p := (*pageAlloc)(pp)
  1024  
  1025  	// Free all the mapped space for the summary levels.
  1026  	if pageAlloc64Bit != 0 {
  1027  		for l := 0; l < summaryLevels; l++ {
  1028  			sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
  1029  		}
  1030  	} else {
  1031  		resSize := uintptr(0)
  1032  		for _, s := range p.summary {
  1033  			resSize += uintptr(cap(s)) * pallocSumBytes
  1034  		}
  1035  		sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
  1036  	}
  1037  
  1038  	// Free the mapped space for chunks.
  1039  	for i := range p.chunks {
  1040  		if x := p.chunks[i]; x != nil {
  1041  			p.chunks[i] = nil
  1042  			// This memory comes from sysAlloc and will always be page-aligned.
  1043  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
  1044  		}
  1045  	}
  1046  }
  1047  
  1048  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1049  // 64 bit and 32 bit platforms, allowing the tests to share code
  1050  // between the two.
  1051  //
  1052  // This should not be higher than 0x100*pallocChunkBytes to support
  1053  // mips and mipsle, which only have 31-bit address spaces.
  1054  var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix))
  1055  
  1056  // PageBase returns an address given a chunk index and a page index
  1057  // relative to that chunk.
  1058  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1059  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1060  }
  1061  
  1062  type BitsMismatch struct {
  1063  	Base      uintptr
  1064  	Got, Want uint64
  1065  }
  1066  
  1067  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1068  	ok = true
  1069  
  1070  	// Run on the system stack to avoid stack growth allocation.
  1071  	systemstack(func() {
  1072  		getg().m.mallocing++
  1073  
  1074  		// Lock so that we can safely access the bitmap.
  1075  		lock(&mheap_.lock)
  1076  	chunkLoop:
  1077  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1078  			chunk := mheap_.pages.tryChunkOf(i)
  1079  			if chunk == nil {
  1080  				continue
  1081  			}
  1082  			for j := 0; j < pallocChunkPages/64; j++ {
  1083  				// Run over each 64-bit bitmap section and ensure
  1084  				// scavenged is being cleared properly on allocation.
  1085  				// If a used bit and scavenged bit are both set, that's
  1086  				// an error, and could indicate a larger problem, or
  1087  				// an accounting problem.
  1088  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1089  				got := chunk.scavenged[j]
  1090  				if want != got {
  1091  					ok = false
  1092  					if n >= len(mismatches) {
  1093  						break chunkLoop
  1094  					}
  1095  					mismatches[n] = BitsMismatch{
  1096  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1097  						Got:  got,
  1098  						Want: want,
  1099  					}
  1100  					n++
  1101  				}
  1102  			}
  1103  		}
  1104  		unlock(&mheap_.lock)
  1105  
  1106  		getg().m.mallocing--
  1107  	})
  1108  	return
  1109  }
  1110  
  1111  func PageCachePagesLeaked() (leaked uintptr) {
  1112  	stopTheWorld("PageCachePagesLeaked")
  1113  
  1114  	// Walk over destroyed Ps and look for unflushed caches.
  1115  	deadp := allp[len(allp):cap(allp)]
  1116  	for _, p := range deadp {
  1117  		// Since we're going past len(allp) we may see nil Ps.
  1118  		// Just ignore them.
  1119  		if p != nil {
  1120  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1121  		}
  1122  	}
  1123  
  1124  	startTheWorld()
  1125  	return
  1126  }
  1127  
  1128  var Semacquire = semacquire
  1129  var Semrelease1 = semrelease1
  1130  
  1131  func SemNwait(addr *uint32) uint32 {
  1132  	root := semroot(addr)
  1133  	return atomic.Load(&root.nwait)
  1134  }
  1135  
  1136  // MapHashCheck computes the hash of the key k for the map m, twice.
  1137  // Method 1 uses the built-in hasher for the map.
  1138  // Method 2 uses the typehash function (the one used by reflect).
  1139  // Returns the two hash values, which should always be equal.
  1140  func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
  1141  	// Unpack m.
  1142  	mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
  1143  	mh := (*hmap)(efaceOf(&m).data)
  1144  
  1145  	// Unpack k.
  1146  	kt := efaceOf(&k)._type
  1147  	var p unsafe.Pointer
  1148  	if isDirectIface(kt) {
  1149  		q := efaceOf(&k).data
  1150  		p = unsafe.Pointer(&q)
  1151  	} else {
  1152  		p = efaceOf(&k).data
  1153  	}
  1154  
  1155  	// Compute the hash functions.
  1156  	x := mt.hasher(noescape(p), uintptr(mh.hash0))
  1157  	y := typehash(kt, noescape(p), uintptr(mh.hash0))
  1158  	return x, y
  1159  }
  1160  
  1161  // mspan wrapper for testing.
  1162  //go:notinheap
  1163  type MSpan mspan
  1164  
  1165  // Allocate an mspan for testing.
  1166  func AllocMSpan() *MSpan {
  1167  	var s *mspan
  1168  	systemstack(func() {
  1169  		lock(&mheap_.lock)
  1170  		s = (*mspan)(mheap_.spanalloc.alloc())
  1171  		unlock(&mheap_.lock)
  1172  	})
  1173  	return (*MSpan)(s)
  1174  }
  1175  
  1176  // Free an allocated mspan.
  1177  func FreeMSpan(s *MSpan) {
  1178  	systemstack(func() {
  1179  		lock(&mheap_.lock)
  1180  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1181  		unlock(&mheap_.lock)
  1182  	})
  1183  }
  1184  
  1185  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1186  	s := (*mspan)(ms)
  1187  	s.nelems = uintptr(len(bits) * 8)
  1188  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1189  	result := s.countAlloc()
  1190  	s.gcmarkBits = nil
  1191  	return result
  1192  }
  1193  
  1194  const (
  1195  	TimeHistSubBucketBits   = timeHistSubBucketBits
  1196  	TimeHistNumSubBuckets   = timeHistNumSubBuckets
  1197  	TimeHistNumSuperBuckets = timeHistNumSuperBuckets
  1198  )
  1199  
  1200  type TimeHistogram timeHistogram
  1201  
  1202  // Counts returns the counts for the given bucket, subBucket indices.
  1203  // Returns true if the bucket was valid, otherwise returns the counts
  1204  // for the underflow bucket and false.
  1205  func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
  1206  	t := (*timeHistogram)(th)
  1207  	i := bucket*TimeHistNumSubBuckets + subBucket
  1208  	if i >= uint(len(t.counts)) {
  1209  		return t.underflow, false
  1210  	}
  1211  	return t.counts[i], true
  1212  }
  1213  
  1214  func (th *TimeHistogram) Record(duration int64) {
  1215  	(*timeHistogram)(th).record(duration)
  1216  }