github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  var Fadd64 = fadd64
    16  var Fsub64 = fsub64
    17  var Fmul64 = fmul64
    18  var Fdiv64 = fdiv64
    19  var F64to32 = f64to32
    20  var F32to64 = f32to64
    21  var Fcmp64 = fcmp64
    22  var Fintto64 = fintto64
    23  var F64toint = f64toint
    24  
    25  var Entersyscall = entersyscall
    26  var Exitsyscall = exitsyscall
    27  var LockedOSThread = lockedOSThread
    28  var Xadduintptr = atomic.Xadduintptr
    29  
    30  var FuncPC = funcPC
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  
    37  type LFNode struct {
    38  	Next    uint64
    39  	Pushcnt uintptr
    40  }
    41  
    42  func LFStackPush(head *uint64, node *LFNode) {
    43  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    44  }
    45  
    46  func LFStackPop(head *uint64) *LFNode {
    47  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    48  }
    49  
    50  func GCMask(x interface{}) (ret []byte) {
    51  	systemstack(func() {
    52  		ret = getgcmask(x)
    53  	})
    54  	return
    55  }
    56  
    57  func RunSchedLocalQueueTest() {
    58  	_p_ := new(p)
    59  	gs := make([]g, len(_p_.runq))
    60  	for i := 0; i < len(_p_.runq); i++ {
    61  		if g, _ := runqget(_p_); g != nil {
    62  			throw("runq is not empty initially")
    63  		}
    64  		for j := 0; j < i; j++ {
    65  			runqput(_p_, &gs[i], false)
    66  		}
    67  		for j := 0; j < i; j++ {
    68  			if g, _ := runqget(_p_); g != &gs[i] {
    69  				print("bad element at iter ", i, "/", j, "\n")
    70  				throw("bad element")
    71  			}
    72  		}
    73  		if g, _ := runqget(_p_); g != nil {
    74  			throw("runq is not empty afterwards")
    75  		}
    76  	}
    77  }
    78  
    79  func RunSchedLocalQueueStealTest() {
    80  	p1 := new(p)
    81  	p2 := new(p)
    82  	gs := make([]g, len(p1.runq))
    83  	for i := 0; i < len(p1.runq); i++ {
    84  		for j := 0; j < i; j++ {
    85  			gs[j].sig = 0
    86  			runqput(p1, &gs[j], false)
    87  		}
    88  		gp := runqsteal(p2, p1, true)
    89  		s := 0
    90  		if gp != nil {
    91  			s++
    92  			gp.sig++
    93  		}
    94  		for {
    95  			gp, _ = runqget(p2)
    96  			if gp == nil {
    97  				break
    98  			}
    99  			s++
   100  			gp.sig++
   101  		}
   102  		for {
   103  			gp, _ = runqget(p1)
   104  			if gp == nil {
   105  				break
   106  			}
   107  			gp.sig++
   108  		}
   109  		for j := 0; j < i; j++ {
   110  			if gs[j].sig != 1 {
   111  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   112  				throw("bad element")
   113  			}
   114  		}
   115  		if s != i/2 && s != i/2+1 {
   116  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   117  			throw("bad steal")
   118  		}
   119  	}
   120  }
   121  
   122  func RunSchedLocalQueueEmptyTest(iters int) {
   123  	// Test that runq is not spuriously reported as empty.
   124  	// Runq emptiness affects scheduling decisions and spurious emptiness
   125  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   126  	// for arbitrary long time).
   127  	done := make(chan bool, 1)
   128  	p := new(p)
   129  	gs := make([]g, 2)
   130  	ready := new(uint32)
   131  	for i := 0; i < iters; i++ {
   132  		*ready = 0
   133  		next0 := (i & 1) == 0
   134  		next1 := (i & 2) == 0
   135  		runqput(p, &gs[0], next0)
   136  		go func() {
   137  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   138  			}
   139  			if runqempty(p) {
   140  				println("next:", next0, next1)
   141  				throw("queue is empty")
   142  			}
   143  			done <- true
   144  		}()
   145  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   146  		}
   147  		runqput(p, &gs[1], next1)
   148  		runqget(p)
   149  		<-done
   150  		runqget(p)
   151  	}
   152  }
   153  
   154  var (
   155  	StringHash = stringHash
   156  	BytesHash  = bytesHash
   157  	Int32Hash  = int32Hash
   158  	Int64Hash  = int64Hash
   159  	MemHash    = memhash
   160  	MemHash32  = memhash32
   161  	MemHash64  = memhash64
   162  	EfaceHash  = efaceHash
   163  	IfaceHash  = ifaceHash
   164  )
   165  
   166  var UseAeshash = &useAeshash
   167  
   168  func MemclrBytes(b []byte) {
   169  	s := (*slice)(unsafe.Pointer(&b))
   170  	memclrNoHeapPointers(s.array, uintptr(s.len))
   171  }
   172  
   173  var HashLoad = &hashLoad
   174  
   175  // entry point for testing
   176  func GostringW(w []uint16) (s string) {
   177  	systemstack(func() {
   178  		s = gostringw(&w[0])
   179  	})
   180  	return
   181  }
   182  
   183  type Uintreg sys.Uintreg
   184  
   185  var Open = open
   186  var Close = closefd
   187  var Read = read
   188  var Write = write
   189  
   190  func Envs() []string     { return envs }
   191  func SetEnvs(e []string) { envs = e }
   192  
   193  var BigEndian = sys.BigEndian
   194  
   195  // For benchmarking.
   196  
   197  func BenchSetType(n int, x interface{}) {
   198  	e := *efaceOf(&x)
   199  	t := e._type
   200  	var size uintptr
   201  	var p unsafe.Pointer
   202  	switch t.kind & kindMask {
   203  	case kindPtr:
   204  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   205  		size = t.size
   206  		p = e.data
   207  	case kindSlice:
   208  		slice := *(*struct {
   209  			ptr      unsafe.Pointer
   210  			len, cap uintptr
   211  		})(e.data)
   212  		t = (*slicetype)(unsafe.Pointer(t)).elem
   213  		size = t.size * slice.len
   214  		p = slice.ptr
   215  	}
   216  	allocSize := roundupsize(size)
   217  	systemstack(func() {
   218  		for i := 0; i < n; i++ {
   219  			heapBitsSetType(uintptr(p), allocSize, size, t)
   220  		}
   221  	})
   222  }
   223  
   224  const PtrSize = sys.PtrSize
   225  
   226  var ForceGCPeriod = &forcegcperiod
   227  
   228  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   229  // the "environment" traceback level, so later calls to
   230  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   231  func SetTracebackEnv(level string) {
   232  	setTraceback(level)
   233  	traceback_env = traceback_cache
   234  }
   235  
   236  var ReadUnaligned32 = readUnaligned32
   237  var ReadUnaligned64 = readUnaligned64
   238  
   239  func CountPagesInUse() (pagesInUse, counted uintptr) {
   240  	stopTheWorld("CountPagesInUse")
   241  
   242  	pagesInUse = uintptr(mheap_.pagesInUse)
   243  
   244  	for _, s := range mheap_.allspans {
   245  		if s.state == mSpanInUse {
   246  			counted += s.npages
   247  		}
   248  	}
   249  
   250  	startTheWorld()
   251  
   252  	return
   253  }
   254  
   255  func Fastrand() uint32          { return fastrand() }
   256  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   257  
   258  type ProfBuf profBuf
   259  
   260  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   261  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   262  }
   263  
   264  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   265  	(*profBuf)(p).write(tag, now, hdr, stk)
   266  }
   267  
   268  const (
   269  	ProfBufBlocking    = profBufBlocking
   270  	ProfBufNonBlocking = profBufNonBlocking
   271  )
   272  
   273  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   274  	return (*profBuf)(p).read(profBufReadMode(mode))
   275  }
   276  
   277  func (p *ProfBuf) Close() {
   278  	(*profBuf)(p).close()
   279  }
   280  
   281  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   282  // MemStats accumulated by scanning the heap.
   283  func ReadMemStatsSlow() (base, slow MemStats) {
   284  	stopTheWorld("ReadMemStatsSlow")
   285  
   286  	// Run on the system stack to avoid stack growth allocation.
   287  	systemstack(func() {
   288  		// Make sure stats don't change.
   289  		getg().m.mallocing++
   290  
   291  		readmemstats_m(&base)
   292  
   293  		// Initialize slow from base and zero the fields we're
   294  		// recomputing.
   295  		slow = base
   296  		slow.Alloc = 0
   297  		slow.TotalAlloc = 0
   298  		slow.Mallocs = 0
   299  		slow.Frees = 0
   300  		slow.HeapReleased = 0
   301  		var bySize [_NumSizeClasses]struct {
   302  			Mallocs, Frees uint64
   303  		}
   304  
   305  		// Add up current allocations in spans.
   306  		for _, s := range mheap_.allspans {
   307  			if s.state != mSpanInUse {
   308  				continue
   309  			}
   310  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   311  				slow.Mallocs++
   312  				slow.Alloc += uint64(s.elemsize)
   313  			} else {
   314  				slow.Mallocs += uint64(s.allocCount)
   315  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   316  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   317  			}
   318  		}
   319  
   320  		// Add in frees. readmemstats_m flushed the cached stats, so
   321  		// these are up-to-date.
   322  		var smallFree uint64
   323  		slow.Frees = mheap_.nlargefree
   324  		for i := range mheap_.nsmallfree {
   325  			slow.Frees += mheap_.nsmallfree[i]
   326  			bySize[i].Frees = mheap_.nsmallfree[i]
   327  			bySize[i].Mallocs += mheap_.nsmallfree[i]
   328  			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
   329  		}
   330  		slow.Frees += memstats.tinyallocs
   331  		slow.Mallocs += slow.Frees
   332  
   333  		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
   334  
   335  		for i := range slow.BySize {
   336  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   337  			slow.BySize[i].Frees = bySize[i].Frees
   338  		}
   339  
   340  		for i := mheap_.scav.start(); i.valid(); i = i.next() {
   341  			slow.HeapReleased += uint64(i.span().released())
   342  		}
   343  
   344  		getg().m.mallocing--
   345  	})
   346  
   347  	startTheWorld()
   348  	return
   349  }
   350  
   351  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   352  // stderr, and blocks in a stack containing
   353  // "runtime.blockOnSystemStackInternal".
   354  func BlockOnSystemStack() {
   355  	systemstack(blockOnSystemStackInternal)
   356  }
   357  
   358  func blockOnSystemStackInternal() {
   359  	print("x\n")
   360  	lock(&deadlock)
   361  	lock(&deadlock)
   362  }
   363  
   364  type RWMutex struct {
   365  	rw rwmutex
   366  }
   367  
   368  func (rw *RWMutex) RLock() {
   369  	rw.rw.rlock()
   370  }
   371  
   372  func (rw *RWMutex) RUnlock() {
   373  	rw.rw.runlock()
   374  }
   375  
   376  func (rw *RWMutex) Lock() {
   377  	rw.rw.lock()
   378  }
   379  
   380  func (rw *RWMutex) Unlock() {
   381  	rw.rw.unlock()
   382  }
   383  
   384  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   385  
   386  func MapBucketsCount(m map[int]int) int {
   387  	h := *(**hmap)(unsafe.Pointer(&m))
   388  	return 1 << h.B
   389  }
   390  
   391  func MapBucketsPointerIsNil(m map[int]int) bool {
   392  	h := *(**hmap)(unsafe.Pointer(&m))
   393  	return h.buckets == nil
   394  }
   395  
   396  func LockOSCounts() (external, internal uint32) {
   397  	g := getg()
   398  	if g.m.lockedExt+g.m.lockedInt == 0 {
   399  		if g.lockedm != 0 {
   400  			panic("lockedm on non-locked goroutine")
   401  		}
   402  	} else {
   403  		if g.lockedm == 0 {
   404  			panic("nil lockedm on locked goroutine")
   405  		}
   406  	}
   407  	return g.m.lockedExt, g.m.lockedInt
   408  }
   409  
   410  //go:noinline
   411  func TracebackSystemstack(stk []uintptr, i int) int {
   412  	if i == 0 {
   413  		pc, sp := getcallerpc(), getcallersp()
   414  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
   415  	}
   416  	n := 0
   417  	systemstack(func() {
   418  		n = TracebackSystemstack(stk, i-1)
   419  	})
   420  	return n
   421  }
   422  
   423  func KeepNArenaHints(n int) {
   424  	hint := mheap_.arenaHints
   425  	for i := 1; i < n; i++ {
   426  		hint = hint.next
   427  		if hint == nil {
   428  			return
   429  		}
   430  	}
   431  	hint.next = nil
   432  }
   433  
   434  // MapNextArenaHint reserves a page at the next arena growth hint,
   435  // preventing the arena from growing there, and returns the range of
   436  // addresses that are no longer viable.
   437  func MapNextArenaHint() (start, end uintptr) {
   438  	hint := mheap_.arenaHints
   439  	addr := hint.addr
   440  	if hint.down {
   441  		start, end = addr-heapArenaBytes, addr
   442  		addr -= physPageSize
   443  	} else {
   444  		start, end = addr, addr+heapArenaBytes
   445  	}
   446  	sysReserve(unsafe.Pointer(addr), physPageSize)
   447  	return
   448  }
   449  
   450  func GetNextArenaHint() uintptr {
   451  	return mheap_.arenaHints.addr
   452  }
   453  
   454  type G = g
   455  
   456  func Getg() *G {
   457  	return getg()
   458  }
   459  
   460  //go:noinline
   461  func PanicForTesting(b []byte, i int) byte {
   462  	return unexportedPanicForTesting(b, i)
   463  }
   464  
   465  //go:noinline
   466  func unexportedPanicForTesting(b []byte, i int) byte {
   467  	return b[i]
   468  }
   469  
   470  func G0StackOverflow() {
   471  	systemstack(func() {
   472  		stackOverflow(nil)
   473  	})
   474  }
   475  
   476  func stackOverflow(x *byte) {
   477  	var buf [256]byte
   478  	stackOverflow(&buf[0])
   479  }
   480  
   481  func MapTombstoneCheck(m map[int]int) {
   482  	// Make sure emptyOne and emptyRest are distributed correctly.
   483  	// We should have a series of filled and emptyOne cells, followed by
   484  	// a series of emptyRest cells.
   485  	h := *(**hmap)(unsafe.Pointer(&m))
   486  	i := interface{}(m)
   487  	t := *(**maptype)(unsafe.Pointer(&i))
   488  
   489  	for x := 0; x < 1<<h.B; x++ {
   490  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
   491  		n := 0
   492  		for b := b0; b != nil; b = b.overflow(t) {
   493  			for i := 0; i < bucketCnt; i++ {
   494  				if b.tophash[i] != emptyRest {
   495  					n++
   496  				}
   497  			}
   498  		}
   499  		k := 0
   500  		for b := b0; b != nil; b = b.overflow(t) {
   501  			for i := 0; i < bucketCnt; i++ {
   502  				if k < n && b.tophash[i] == emptyRest {
   503  					panic("early emptyRest")
   504  				}
   505  				if k >= n && b.tophash[i] != emptyRest {
   506  					panic("late non-emptyRest")
   507  				}
   508  				if k == n-1 && b.tophash[i] == emptyOne {
   509  					panic("last non-emptyRest entry is emptyOne")
   510  				}
   511  				k++
   512  			}
   513  		}
   514  	}
   515  }
   516  
   517  // Span is a safe wrapper around an mspan, whose memory
   518  // is managed manually.
   519  type Span struct {
   520  	*mspan
   521  }
   522  
   523  func AllocSpan(base, npages uintptr) Span {
   524  	lock(&mheap_.lock)
   525  	s := (*mspan)(mheap_.spanalloc.alloc())
   526  	unlock(&mheap_.lock)
   527  	s.init(base, npages)
   528  	return Span{s}
   529  }
   530  
   531  func (s *Span) Free() {
   532  	lock(&mheap_.lock)
   533  	mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
   534  	unlock(&mheap_.lock)
   535  	s.mspan = nil
   536  }
   537  
   538  func (s Span) Base() uintptr {
   539  	return s.mspan.base()
   540  }
   541  
   542  func (s Span) Pages() uintptr {
   543  	return s.mspan.npages
   544  }
   545  
   546  type TreapIter struct {
   547  	treapIter
   548  }
   549  
   550  func (t TreapIter) Span() Span {
   551  	return Span{t.span()}
   552  }
   553  
   554  func (t TreapIter) Valid() bool {
   555  	return t.valid()
   556  }
   557  
   558  func (t TreapIter) Next() TreapIter {
   559  	return TreapIter{t.next()}
   560  }
   561  
   562  func (t TreapIter) Prev() TreapIter {
   563  	return TreapIter{t.prev()}
   564  }
   565  
   566  // Treap is a safe wrapper around mTreap for testing.
   567  //
   568  // It must never be heap-allocated because mTreap is
   569  // notinheap.
   570  //
   571  //go:notinheap
   572  type Treap struct {
   573  	mTreap
   574  }
   575  
   576  func (t *Treap) Start() TreapIter {
   577  	return TreapIter{t.start()}
   578  }
   579  
   580  func (t *Treap) End() TreapIter {
   581  	return TreapIter{t.end()}
   582  }
   583  
   584  func (t *Treap) Insert(s Span) {
   585  	// mTreap uses a fixalloc in mheap_ for treapNode
   586  	// allocation which requires the mheap_ lock to manipulate.
   587  	// Locking here is safe because the treap itself never allocs
   588  	// or otherwise ends up grabbing this lock.
   589  	lock(&mheap_.lock)
   590  	t.insert(s.mspan)
   591  	unlock(&mheap_.lock)
   592  	t.CheckInvariants()
   593  }
   594  
   595  func (t *Treap) Find(npages uintptr) TreapIter {
   596  	return TreapIter{treapIter{t.find(npages)}}
   597  }
   598  
   599  func (t *Treap) Erase(i TreapIter) {
   600  	// mTreap uses a fixalloc in mheap_ for treapNode
   601  	// freeing which requires the mheap_ lock to manipulate.
   602  	// Locking here is safe because the treap itself never allocs
   603  	// or otherwise ends up grabbing this lock.
   604  	lock(&mheap_.lock)
   605  	t.erase(i.treapIter)
   606  	unlock(&mheap_.lock)
   607  	t.CheckInvariants()
   608  }
   609  
   610  func (t *Treap) RemoveSpan(s Span) {
   611  	// See Erase about locking.
   612  	lock(&mheap_.lock)
   613  	t.removeSpan(s.mspan)
   614  	unlock(&mheap_.lock)
   615  	t.CheckInvariants()
   616  }
   617  
   618  func (t *Treap) Size() int {
   619  	i := 0
   620  	t.mTreap.treap.walkTreap(func(t *treapNode) {
   621  		i++
   622  	})
   623  	return i
   624  }
   625  
   626  func (t *Treap) CheckInvariants() {
   627  	t.mTreap.treap.walkTreap(checkTreapNode)
   628  }