github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"github.com/x04/go/src/runtime/internal/atomic"
    11  	"github.com/x04/go/src/runtime/internal/sys"
    12  	"github.com/x04/go/src/unsafe"
    13  )
    14  
    15  var Fadd64 = fadd64
    16  var Fsub64 = fsub64
    17  var Fmul64 = fmul64
    18  var Fdiv64 = fdiv64
    19  var F64to32 = f64to32
    20  var F32to64 = f32to64
    21  var Fcmp64 = fcmp64
    22  var Fintto64 = fintto64
    23  var F64toint = f64toint
    24  
    25  var Entersyscall = entersyscall
    26  var Exitsyscall = exitsyscall
    27  var LockedOSThread = lockedOSThread
    28  var Xadduintptr = atomic.Xadduintptr
    29  
    30  var FuncPC = funcPC
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  
    37  var Nanotime = nanotime
    38  var NetpollBreak = netpollBreak
    39  var Usleep = usleep
    40  
    41  var PhysPageSize = physPageSize
    42  var PhysHugePageSize = physHugePageSize
    43  
    44  var NetpollGenericInit = netpollGenericInit
    45  
    46  var ParseRelease = parseRelease
    47  
    48  var Memmove = memmove
    49  var MemclrNoHeapPointers = memclrNoHeapPointers
    50  
    51  const PreemptMSupported = preemptMSupported
    52  
    53  type LFNode struct {
    54  	Next	uint64
    55  	Pushcnt	uintptr
    56  }
    57  
    58  func LFStackPush(head *uint64, node *LFNode) {
    59  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    60  }
    61  
    62  func LFStackPop(head *uint64) *LFNode {
    63  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    64  }
    65  
    66  func Netpoll(delta int64) {
    67  	systemstack(func() {
    68  		netpoll(delta)
    69  	})
    70  }
    71  
    72  func GCMask(x interface{}) (ret []byte) {
    73  	systemstack(func() {
    74  		ret = getgcmask(x)
    75  	})
    76  	return
    77  }
    78  
    79  func RunSchedLocalQueueTest() {
    80  	_p_ := new(p)
    81  	gs := make([]g, len(_p_.runq))
    82  	for i := 0; i < len(_p_.runq); i++ {
    83  		if g, _ := runqget(_p_); g != nil {
    84  			throw("runq is not empty initially")
    85  		}
    86  		for j := 0; j < i; j++ {
    87  			runqput(_p_, &gs[i], false)
    88  		}
    89  		for j := 0; j < i; j++ {
    90  			if g, _ := runqget(_p_); g != &gs[i] {
    91  				print("bad element at iter ", i, "/", j, "\n")
    92  				throw("bad element")
    93  			}
    94  		}
    95  		if g, _ := runqget(_p_); g != nil {
    96  			throw("runq is not empty afterwards")
    97  		}
    98  	}
    99  }
   100  
   101  func RunSchedLocalQueueStealTest() {
   102  	p1 := new(p)
   103  	p2 := new(p)
   104  	gs := make([]g, len(p1.runq))
   105  	for i := 0; i < len(p1.runq); i++ {
   106  		for j := 0; j < i; j++ {
   107  			gs[j].sig = 0
   108  			runqput(p1, &gs[j], false)
   109  		}
   110  		gp := runqsteal(p2, p1, true)
   111  		s := 0
   112  		if gp != nil {
   113  			s++
   114  			gp.sig++
   115  		}
   116  		for {
   117  			gp, _ = runqget(p2)
   118  			if gp == nil {
   119  				break
   120  			}
   121  			s++
   122  			gp.sig++
   123  		}
   124  		for {
   125  			gp, _ = runqget(p1)
   126  			if gp == nil {
   127  				break
   128  			}
   129  			gp.sig++
   130  		}
   131  		for j := 0; j < i; j++ {
   132  			if gs[j].sig != 1 {
   133  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   134  				throw("bad element")
   135  			}
   136  		}
   137  		if s != i/2 && s != i/2+1 {
   138  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   139  			throw("bad steal")
   140  		}
   141  	}
   142  }
   143  
   144  func RunSchedLocalQueueEmptyTest(iters int) {
   145  	// Test that runq is not spuriously reported as empty.
   146  	// Runq emptiness affects scheduling decisions and spurious emptiness
   147  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   148  	// for arbitrary long time).
   149  	done := make(chan bool, 1)
   150  	p := new(p)
   151  	gs := make([]g, 2)
   152  	ready := new(uint32)
   153  	for i := 0; i < iters; i++ {
   154  		*ready = 0
   155  		next0 := (i & 1) == 0
   156  		next1 := (i & 2) == 0
   157  		runqput(p, &gs[0], next0)
   158  		go func() {
   159  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   160  			}
   161  			if runqempty(p) {
   162  				println("next:", next0, next1)
   163  				throw("queue is empty")
   164  			}
   165  			done <- true
   166  		}()
   167  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   168  		}
   169  		runqput(p, &gs[1], next1)
   170  		runqget(p)
   171  		<-done
   172  		runqget(p)
   173  	}
   174  }
   175  
   176  var (
   177  	StringHash	= stringHash
   178  	BytesHash	= bytesHash
   179  	Int32Hash	= int32Hash
   180  	Int64Hash	= int64Hash
   181  	MemHash		= memhash
   182  	MemHash32	= memhash32
   183  	MemHash64	= memhash64
   184  	EfaceHash	= efaceHash
   185  	IfaceHash	= ifaceHash
   186  )
   187  
   188  var UseAeshash = &useAeshash
   189  
   190  func MemclrBytes(b []byte) {
   191  	s := (*slice)(unsafe.Pointer(&b))
   192  	memclrNoHeapPointers(s.array, uintptr(s.len))
   193  }
   194  
   195  var HashLoad = &hashLoad
   196  
   197  // entry point for testing
   198  func GostringW(w []uint16) (s string) {
   199  	systemstack(func() {
   200  		s = gostringw(&w[0])
   201  	})
   202  	return
   203  }
   204  
   205  type Uintreg sys.Uintreg
   206  
   207  var Open = open
   208  var Close = closefd
   209  var Read = read
   210  var Write = write
   211  
   212  func Envs() []string		{ return envs }
   213  func SetEnvs(e []string)	{ envs = e }
   214  
   215  var BigEndian = sys.BigEndian
   216  
   217  // For benchmarking.
   218  
   219  func BenchSetType(n int, x interface{}) {
   220  	e := *efaceOf(&x)
   221  	t := e._type
   222  	var size uintptr
   223  	var p unsafe.Pointer
   224  	switch t.kind & kindMask {
   225  	case kindPtr:
   226  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   227  		size = t.size
   228  		p = e.data
   229  	case kindSlice:
   230  		slice := *(*struct {
   231  			ptr		unsafe.Pointer
   232  			len, cap	uintptr
   233  		})(e.data)
   234  		t = (*slicetype)(unsafe.Pointer(t)).elem
   235  		size = t.size * slice.len
   236  		p = slice.ptr
   237  	}
   238  	allocSize := roundupsize(size)
   239  	systemstack(func() {
   240  		for i := 0; i < n; i++ {
   241  			heapBitsSetType(uintptr(p), allocSize, size, t)
   242  		}
   243  	})
   244  }
   245  
   246  const PtrSize = sys.PtrSize
   247  
   248  var ForceGCPeriod = &forcegcperiod
   249  
   250  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   251  // the "environment" traceback level, so later calls to
   252  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   253  func SetTracebackEnv(level string) {
   254  	setTraceback(level)
   255  	traceback_env = traceback_cache
   256  }
   257  
   258  var ReadUnaligned32 = readUnaligned32
   259  var ReadUnaligned64 = readUnaligned64
   260  
   261  func CountPagesInUse() (pagesInUse, counted uintptr) {
   262  	stopTheWorld("CountPagesInUse")
   263  
   264  	pagesInUse = uintptr(mheap_.pagesInUse)
   265  
   266  	for _, s := range mheap_.allspans {
   267  		if s.state.get() == mSpanInUse {
   268  			counted += s.npages
   269  		}
   270  	}
   271  
   272  	startTheWorld()
   273  
   274  	return
   275  }
   276  
   277  func Fastrand() uint32		{ return fastrand() }
   278  func Fastrandn(n uint32) uint32	{ return fastrandn(n) }
   279  
   280  type ProfBuf profBuf
   281  
   282  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   283  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   284  }
   285  
   286  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   287  	(*profBuf)(p).write(tag, now, hdr, stk)
   288  }
   289  
   290  const (
   291  	ProfBufBlocking		= profBufBlocking
   292  	ProfBufNonBlocking	= profBufNonBlocking
   293  )
   294  
   295  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   296  	return (*profBuf)(p).read(profBufReadMode(mode))
   297  }
   298  
   299  func (p *ProfBuf) Close() {
   300  	(*profBuf)(p).close()
   301  }
   302  
   303  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   304  // MemStats accumulated by scanning the heap.
   305  func ReadMemStatsSlow() (base, slow MemStats) {
   306  	stopTheWorld("ReadMemStatsSlow")
   307  
   308  	// Run on the system stack to avoid stack growth allocation.
   309  	systemstack(func() {
   310  		// Make sure stats don't change.
   311  		getg().m.mallocing++
   312  
   313  		readmemstats_m(&base)
   314  
   315  		// Initialize slow from base and zero the fields we're
   316  		// recomputing.
   317  		slow = base
   318  		slow.Alloc = 0
   319  		slow.TotalAlloc = 0
   320  		slow.Mallocs = 0
   321  		slow.Frees = 0
   322  		slow.HeapReleased = 0
   323  		var bySize [_NumSizeClasses]struct {
   324  			Mallocs, Frees uint64
   325  		}
   326  
   327  		// Add up current allocations in spans.
   328  		for _, s := range mheap_.allspans {
   329  			if s.state.get() != mSpanInUse {
   330  				continue
   331  			}
   332  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   333  				slow.Mallocs++
   334  				slow.Alloc += uint64(s.elemsize)
   335  			} else {
   336  				slow.Mallocs += uint64(s.allocCount)
   337  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   338  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   339  			}
   340  		}
   341  
   342  		// Add in frees. readmemstats_m flushed the cached stats, so
   343  		// these are up-to-date.
   344  		var smallFree uint64
   345  		slow.Frees = mheap_.nlargefree
   346  		for i := range mheap_.nsmallfree {
   347  			slow.Frees += mheap_.nsmallfree[i]
   348  			bySize[i].Frees = mheap_.nsmallfree[i]
   349  			bySize[i].Mallocs += mheap_.nsmallfree[i]
   350  			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
   351  		}
   352  		slow.Frees += memstats.tinyallocs
   353  		slow.Mallocs += slow.Frees
   354  
   355  		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
   356  
   357  		for i := range slow.BySize {
   358  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   359  			slow.BySize[i].Frees = bySize[i].Frees
   360  		}
   361  
   362  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   363  			pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
   364  			slow.HeapReleased += uint64(pg) * pageSize
   365  		}
   366  		for _, p := range allp {
   367  			pg := sys.OnesCount64(p.pcache.scav)
   368  			slow.HeapReleased += uint64(pg) * pageSize
   369  		}
   370  
   371  		// Unused space in the current arena also counts as released space.
   372  		slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base)
   373  
   374  		getg().m.mallocing--
   375  	})
   376  
   377  	startTheWorld()
   378  	return
   379  }
   380  
   381  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   382  // stderr, and blocks in a stack containing
   383  // "runtime.blockOnSystemStackInternal".
   384  func BlockOnSystemStack() {
   385  	systemstack(blockOnSystemStackInternal)
   386  }
   387  
   388  func blockOnSystemStackInternal() {
   389  	print("x\n")
   390  	lock(&deadlock)
   391  	lock(&deadlock)
   392  }
   393  
   394  type RWMutex struct {
   395  	rw rwmutex
   396  }
   397  
   398  func (rw *RWMutex) RLock() {
   399  	rw.rw.rlock()
   400  }
   401  
   402  func (rw *RWMutex) RUnlock() {
   403  	rw.rw.runlock()
   404  }
   405  
   406  func (rw *RWMutex) Lock() {
   407  	rw.rw.lock()
   408  }
   409  
   410  func (rw *RWMutex) Unlock() {
   411  	rw.rw.unlock()
   412  }
   413  
   414  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   415  
   416  func MapBucketsCount(m map[int]int) int {
   417  	h := *(**hmap)(unsafe.Pointer(&m))
   418  	return 1 << h.B
   419  }
   420  
   421  func MapBucketsPointerIsNil(m map[int]int) bool {
   422  	h := *(**hmap)(unsafe.Pointer(&m))
   423  	return h.buckets == nil
   424  }
   425  
   426  func LockOSCounts() (external, internal uint32) {
   427  	g := getg()
   428  	if g.m.lockedExt+g.m.lockedInt == 0 {
   429  		if g.lockedm != 0 {
   430  			panic("lockedm on non-locked goroutine")
   431  		}
   432  	} else {
   433  		if g.lockedm == 0 {
   434  			panic("nil lockedm on locked goroutine")
   435  		}
   436  	}
   437  	return g.m.lockedExt, g.m.lockedInt
   438  }
   439  
   440  //go:noinline
   441  func TracebackSystemstack(stk []uintptr, i int) int {
   442  	if i == 0 {
   443  		pc, sp := getcallerpc(), getcallersp()
   444  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
   445  	}
   446  	n := 0
   447  	systemstack(func() {
   448  		n = TracebackSystemstack(stk, i-1)
   449  	})
   450  	return n
   451  }
   452  
   453  func KeepNArenaHints(n int) {
   454  	hint := mheap_.arenaHints
   455  	for i := 1; i < n; i++ {
   456  		hint = hint.next
   457  		if hint == nil {
   458  			return
   459  		}
   460  	}
   461  	hint.next = nil
   462  }
   463  
   464  // MapNextArenaHint reserves a page at the next arena growth hint,
   465  // preventing the arena from growing there, and returns the range of
   466  // addresses that are no longer viable.
   467  func MapNextArenaHint() (start, end uintptr) {
   468  	hint := mheap_.arenaHints
   469  	addr := hint.addr
   470  	if hint.down {
   471  		start, end = addr-heapArenaBytes, addr
   472  		addr -= physPageSize
   473  	} else {
   474  		start, end = addr, addr+heapArenaBytes
   475  	}
   476  	sysReserve(unsafe.Pointer(addr), physPageSize)
   477  	return
   478  }
   479  
   480  func GetNextArenaHint() uintptr {
   481  	return mheap_.arenaHints.addr
   482  }
   483  
   484  type G = g
   485  
   486  func Getg() *G {
   487  	return getg()
   488  }
   489  
   490  //go:noinline
   491  func PanicForTesting(b []byte, i int) byte {
   492  	return unexportedPanicForTesting(b, i)
   493  }
   494  
   495  //go:noinline
   496  func unexportedPanicForTesting(b []byte, i int) byte {
   497  	return b[i]
   498  }
   499  
   500  func G0StackOverflow() {
   501  	systemstack(func() {
   502  		stackOverflow(nil)
   503  	})
   504  }
   505  
   506  func stackOverflow(x *byte) {
   507  	var buf [256]byte
   508  	stackOverflow(&buf[0])
   509  }
   510  
   511  func MapTombstoneCheck(m map[int]int) {
   512  	// Make sure emptyOne and emptyRest are distributed correctly.
   513  	// We should have a series of filled and emptyOne cells, followed by
   514  	// a series of emptyRest cells.
   515  	h := *(**hmap)(unsafe.Pointer(&m))
   516  	i := interface{}(m)
   517  	t := *(**maptype)(unsafe.Pointer(&i))
   518  
   519  	for x := 0; x < 1<<h.B; x++ {
   520  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
   521  		n := 0
   522  		for b := b0; b != nil; b = b.overflow(t) {
   523  			for i := 0; i < bucketCnt; i++ {
   524  				if b.tophash[i] != emptyRest {
   525  					n++
   526  				}
   527  			}
   528  		}
   529  		k := 0
   530  		for b := b0; b != nil; b = b.overflow(t) {
   531  			for i := 0; i < bucketCnt; i++ {
   532  				if k < n && b.tophash[i] == emptyRest {
   533  					panic("early emptyRest")
   534  				}
   535  				if k >= n && b.tophash[i] != emptyRest {
   536  					panic("late non-emptyRest")
   537  				}
   538  				if k == n-1 && b.tophash[i] == emptyOne {
   539  					panic("last non-emptyRest entry is emptyOne")
   540  				}
   541  				k++
   542  			}
   543  		}
   544  	}
   545  }
   546  
   547  func RunGetgThreadSwitchTest() {
   548  	// Test that getg works correctly with thread switch.
   549  	// With gccgo, if we generate getg inlined, the backend
   550  	// may cache the address of the TLS variable, which
   551  	// will become invalid after a thread switch. This test
   552  	// checks that the bad caching doesn't happen.
   553  
   554  	ch := make(chan int)
   555  	go func(ch chan int) {
   556  		ch <- 5
   557  		LockOSThread()
   558  	}(ch)
   559  
   560  	g1 := getg()
   561  
   562  	// Block on a receive. This is likely to get us a thread
   563  	// switch. If we yield to the sender goroutine, it will
   564  	// lock the thread, forcing us to resume on a different
   565  	// thread.
   566  	<-ch
   567  
   568  	g2 := getg()
   569  	if g1 != g2 {
   570  		panic("g1 != g2")
   571  	}
   572  
   573  	// Also test getg after some control flow, as the
   574  	// backend is sensitive to control flow.
   575  	g3 := getg()
   576  	if g1 != g3 {
   577  		panic("g1 != g3")
   578  	}
   579  }
   580  
   581  const (
   582  	PageSize		= pageSize
   583  	PallocChunkPages	= pallocChunkPages
   584  	PageAlloc64Bit		= pageAlloc64Bit
   585  	PallocSumBytes		= pallocSumBytes
   586  )
   587  
   588  // Expose pallocSum for testing.
   589  type PallocSum pallocSum
   590  
   591  func PackPallocSum(start, max, end uint) PallocSum	{ return PallocSum(packPallocSum(start, max, end)) }
   592  func (m PallocSum) Start() uint				{ return pallocSum(m).start() }
   593  func (m PallocSum) Max() uint				{ return pallocSum(m).max() }
   594  func (m PallocSum) End() uint				{ return pallocSum(m).end() }
   595  
   596  // Expose pallocBits for testing.
   597  type PallocBits pallocBits
   598  
   599  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   600  	return (*pallocBits)(b).find(npages, searchIdx)
   601  }
   602  func (b *PallocBits) AllocRange(i, n uint)		{ (*pallocBits)(b).allocRange(i, n) }
   603  func (b *PallocBits) Free(i, n uint)			{ (*pallocBits)(b).free(i, n) }
   604  func (b *PallocBits) Summarize() PallocSum		{ return PallocSum((*pallocBits)(b).summarize()) }
   605  func (b *PallocBits) PopcntRange(i, n uint) uint	{ return (*pageBits)(b).popcntRange(i, n) }
   606  
   607  // SummarizeSlow is a slow but more obviously correct implementation
   608  // of (*pallocBits).summarize. Used for testing.
   609  func SummarizeSlow(b *PallocBits) PallocSum {
   610  	var start, max, end uint
   611  
   612  	const N = uint(len(b)) * 64
   613  	for start < N && (*pageBits)(b).get(start) == 0 {
   614  		start++
   615  	}
   616  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   617  		end++
   618  	}
   619  	run := uint(0)
   620  	for i := uint(0); i < N; i++ {
   621  		if (*pageBits)(b).get(i) == 0 {
   622  			run++
   623  		} else {
   624  			run = 0
   625  		}
   626  		if run > max {
   627  			max = run
   628  		}
   629  	}
   630  	return PackPallocSum(start, max, end)
   631  }
   632  
   633  // Expose non-trivial helpers for testing.
   634  func FindBitRange64(c uint64, n uint) uint	{ return findBitRange64(c, n) }
   635  
   636  // Given two PallocBits, returns a set of bit ranges where
   637  // they differ.
   638  func DiffPallocBits(a, b *PallocBits) []BitRange {
   639  	ba := (*pageBits)(a)
   640  	bb := (*pageBits)(b)
   641  
   642  	var d []BitRange
   643  	base, size := uint(0), uint(0)
   644  	for i := uint(0); i < uint(len(ba))*64; i++ {
   645  		if ba.get(i) != bb.get(i) {
   646  			if size == 0 {
   647  				base = i
   648  			}
   649  			size++
   650  		} else {
   651  			if size != 0 {
   652  				d = append(d, BitRange{base, size})
   653  			}
   654  			size = 0
   655  		}
   656  	}
   657  	if size != 0 {
   658  		d = append(d, BitRange{base, size})
   659  	}
   660  	return d
   661  }
   662  
   663  // StringifyPallocBits gets the bits in the bit range r from b,
   664  // and returns a string containing the bits as ASCII 0 and 1
   665  // characters.
   666  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   667  	str := ""
   668  	for j := r.I; j < r.I+r.N; j++ {
   669  		if (*pageBits)(b).get(j) != 0 {
   670  			str += "1"
   671  		} else {
   672  			str += "0"
   673  		}
   674  	}
   675  	return str
   676  }
   677  
   678  // Expose pallocData for testing.
   679  type PallocData pallocData
   680  
   681  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   682  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   683  }
   684  func (d *PallocData) AllocRange(i, n uint)	{ (*pallocData)(d).allocRange(i, n) }
   685  func (d *PallocData) ScavengedSetRange(i, n uint) {
   686  	(*pallocData)(d).scavenged.setRange(i, n)
   687  }
   688  func (d *PallocData) PallocBits() *PallocBits {
   689  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   690  }
   691  func (d *PallocData) Scavenged() *PallocBits {
   692  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   693  }
   694  
   695  // Expose fillAligned for testing.
   696  func FillAligned(x uint64, m uint) uint64	{ return fillAligned(x, m) }
   697  
   698  // Expose pageCache for testing.
   699  type PageCache pageCache
   700  
   701  const PageCachePages = pageCachePages
   702  
   703  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   704  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   705  }
   706  func (c *PageCache) Empty() bool	{ return (*pageCache)(c).empty() }
   707  func (c *PageCache) Base() uintptr	{ return (*pageCache)(c).base }
   708  func (c *PageCache) Cache() uint64	{ return (*pageCache)(c).cache }
   709  func (c *PageCache) Scav() uint64	{ return (*pageCache)(c).scav }
   710  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   711  	return (*pageCache)(c).alloc(npages)
   712  }
   713  func (c *PageCache) Flush(s *PageAlloc) {
   714  	(*pageCache)(c).flush((*pageAlloc)(s))
   715  }
   716  
   717  // Expose chunk index type.
   718  type ChunkIdx chunkIdx
   719  
   720  // Expose pageAlloc for testing. Note that because pageAlloc is
   721  // not in the heap, so is PageAlloc.
   722  type PageAlloc pageAlloc
   723  
   724  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   725  	return (*pageAlloc)(p).alloc(npages)
   726  }
   727  func (p *PageAlloc) AllocToCache() PageCache {
   728  	return PageCache((*pageAlloc)(p).allocToCache())
   729  }
   730  func (p *PageAlloc) Free(base, npages uintptr) {
   731  	(*pageAlloc)(p).free(base, npages)
   732  }
   733  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   734  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   735  }
   736  func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
   737  	systemstack(func() {
   738  		r = (*pageAlloc)(p).scavenge(nbytes, locked)
   739  	})
   740  	return
   741  }
   742  func (p *PageAlloc) InUse() []AddrRange {
   743  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   744  	for _, r := range p.inUse.ranges {
   745  		ranges = append(ranges, AddrRange{
   746  			Base:	r.base,
   747  			Limit:	r.limit,
   748  		})
   749  	}
   750  	return ranges
   751  }
   752  
   753  // Returns nil if the PallocData's L2 is missing.
   754  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   755  	ci := chunkIdx(i)
   756  	l2 := (*pageAlloc)(p).chunks[ci.l1()]
   757  	if l2 == nil {
   758  		return nil
   759  	}
   760  	return (*PallocData)(&l2[ci.l2()])
   761  }
   762  
   763  // AddrRange represents a range over addresses.
   764  // Specifically, it represents the range [Base, Limit).
   765  type AddrRange struct {
   766  	Base, Limit uintptr
   767  }
   768  
   769  // BitRange represents a range over a bitmap.
   770  type BitRange struct {
   771  	I, N uint	// bit index and length in bits
   772  }
   773  
   774  // NewPageAlloc creates a new page allocator for testing and
   775  // initializes it with the scav and chunks maps. Each key in these maps
   776  // represents a chunk index and each value is a series of bit ranges to
   777  // set within each bitmap's chunk.
   778  //
   779  // The initialization of the pageAlloc preserves the invariant that if a
   780  // scavenged bit is set the alloc bit is necessarily unset, so some
   781  // of the bits described by scav may be cleared in the final bitmap if
   782  // ranges in chunks overlap with them.
   783  //
   784  // scav is optional, and if nil, the scavenged bitmap will be cleared
   785  // (as opposed to all 1s, which it usually is). Furthermore, every
   786  // chunk index in scav must appear in chunks; ones that do not are
   787  // ignored.
   788  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
   789  	p := new(pageAlloc)
   790  
   791  	// We've got an entry, so initialize the pageAlloc.
   792  	p.init(new(mutex), nil)
   793  	p.test = true
   794  
   795  	for i, init := range chunks {
   796  		addr := chunkBase(chunkIdx(i))
   797  
   798  		// Mark the chunk's existence in the pageAlloc.
   799  		p.grow(addr, pallocChunkBytes)
   800  
   801  		// Initialize the bitmap and update pageAlloc metadata.
   802  		chunk := p.chunkOf(chunkIndex(addr))
   803  
   804  		// Clear all the scavenged bits which grow set.
   805  		chunk.scavenged.clearRange(0, pallocChunkPages)
   806  
   807  		// Apply scavenge state if applicable.
   808  		if scav != nil {
   809  			if scvg, ok := scav[i]; ok {
   810  				for _, s := range scvg {
   811  					// Ignore the case of s.N == 0. setRange doesn't handle
   812  					// it and it's a no-op anyway.
   813  					if s.N != 0 {
   814  						chunk.scavenged.setRange(s.I, s.N)
   815  					}
   816  				}
   817  			}
   818  		}
   819  		p.resetScavengeAddr()
   820  
   821  		// Apply alloc state.
   822  		for _, s := range init {
   823  			// Ignore the case of s.N == 0. allocRange doesn't handle
   824  			// it and it's a no-op anyway.
   825  			if s.N != 0 {
   826  				chunk.allocRange(s.I, s.N)
   827  			}
   828  		}
   829  
   830  		// Update heap metadata for the allocRange calls above.
   831  		p.update(addr, pallocChunkPages, false, false)
   832  	}
   833  	return (*PageAlloc)(p)
   834  }
   835  
   836  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
   837  // is called the pageAlloc may no longer be used. The object itself will be
   838  // collected by the garbage collector once it is no longer live.
   839  func FreePageAlloc(pp *PageAlloc) {
   840  	p := (*pageAlloc)(pp)
   841  
   842  	// Free all the mapped space for the summary levels.
   843  	if pageAlloc64Bit != 0 {
   844  		for l := 0; l < summaryLevels; l++ {
   845  			sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
   846  		}
   847  	} else {
   848  		resSize := uintptr(0)
   849  		for _, s := range p.summary {
   850  			resSize += uintptr(cap(s)) * pallocSumBytes
   851  		}
   852  		sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
   853  	}
   854  
   855  	// Free the mapped space for chunks.
   856  	for i := range p.chunks {
   857  		if x := p.chunks[i]; x != nil {
   858  			p.chunks[i] = nil
   859  			// This memory comes from sysAlloc and will always be page-aligned.
   860  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
   861  		}
   862  	}
   863  }
   864  
   865  // BaseChunkIdx is a convenient chunkIdx value which works on both
   866  // 64 bit and 32 bit platforms, allowing the tests to share code
   867  // between the two.
   868  //
   869  // On AIX, the arenaBaseOffset is 0x0a00000000000000. However, this
   870  // constant can't be used here because it is negative and will cause
   871  // a constant overflow.
   872  //
   873  // This should not be higher than 0x100*pallocChunkBytes to support
   874  // mips and mipsle, which only have 31-bit address spaces.
   875  var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + 0x0a00000000000000*sys.GoosAix))
   876  
   877  // PageBase returns an address given a chunk index and a page index
   878  // relative to that chunk.
   879  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
   880  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
   881  }
   882  
   883  type BitsMismatch struct {
   884  	Base		uintptr
   885  	Got, Want	uint64
   886  }
   887  
   888  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
   889  	ok = true
   890  
   891  	// Run on the system stack to avoid stack growth allocation.
   892  	systemstack(func() {
   893  		getg().m.mallocing++
   894  
   895  		// Lock so that we can safely access the bitmap.
   896  		lock(&mheap_.lock)
   897  	chunkLoop:
   898  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   899  			chunk := mheap_.pages.chunkOf(i)
   900  			for j := 0; j < pallocChunkPages/64; j++ {
   901  				// Run over each 64-bit bitmap section and ensure
   902  				// scavenged is being cleared properly on allocation.
   903  				// If a used bit and scavenged bit are both set, that's
   904  				// an error, and could indicate a larger problem, or
   905  				// an accounting problem.
   906  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
   907  				got := chunk.scavenged[j]
   908  				if want != got {
   909  					ok = false
   910  					if n >= len(mismatches) {
   911  						break chunkLoop
   912  					}
   913  					mismatches[n] = BitsMismatch{
   914  						Base:	chunkBase(i) + uintptr(j)*64*pageSize,
   915  						Got:	got,
   916  						Want:	want,
   917  					}
   918  					n++
   919  				}
   920  			}
   921  		}
   922  		unlock(&mheap_.lock)
   923  
   924  		getg().m.mallocing--
   925  	})
   926  	return
   927  }
   928  
   929  func PageCachePagesLeaked() (leaked uintptr) {
   930  	stopTheWorld("PageCachePagesLeaked")
   931  
   932  	// Walk over destroyed Ps and look for unflushed caches.
   933  	deadp := allp[len(allp):cap(allp)]
   934  	for _, p := range deadp {
   935  		// Since we're going past len(allp) we may see nil Ps.
   936  		// Just ignore them.
   937  		if p != nil {
   938  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
   939  		}
   940  	}
   941  
   942  	startTheWorld()
   943  	return
   944  }
   945  
   946  var Semacquire = semacquire
   947  var Semrelease1 = semrelease1
   948  
   949  func SemNwait(addr *uint32) uint32 {
   950  	root := semroot(addr)
   951  	return atomic.Load(&root.nwait)
   952  }