github.com/eun/go@v0.0.0-20170811110501-92cfd07a6cfd/src/runtime/export_test.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  var Fadd64 = fadd64
    16  var Fsub64 = fsub64
    17  var Fmul64 = fmul64
    18  var Fdiv64 = fdiv64
    19  var F64to32 = f64to32
    20  var F32to64 = f32to64
    21  var Fcmp64 = fcmp64
    22  var Fintto64 = fintto64
    23  var F64toint = f64toint
    24  var Sqrt = sqrt
    25  
    26  var Entersyscall = entersyscall
    27  var Exitsyscall = exitsyscall
    28  var LockedOSThread = lockedOSThread
    29  var Xadduintptr = atomic.Xadduintptr
    30  
    31  var FuncPC = funcPC
    32  
    33  var Fastlog2 = fastlog2
    34  
    35  var Atoi = atoi
    36  var Atoi32 = atoi32
    37  
    38  type LFNode struct {
    39  	Next    uint64
    40  	Pushcnt uintptr
    41  }
    42  
    43  func LFStackPush(head *uint64, node *LFNode) {
    44  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    45  }
    46  
    47  func LFStackPop(head *uint64) *LFNode {
    48  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    49  }
    50  
    51  func GCMask(x interface{}) (ret []byte) {
    52  	systemstack(func() {
    53  		ret = getgcmask(x)
    54  	})
    55  	return
    56  }
    57  
    58  func RunSchedLocalQueueTest() {
    59  	_p_ := new(p)
    60  	gs := make([]g, len(_p_.runq))
    61  	for i := 0; i < len(_p_.runq); i++ {
    62  		if g, _ := runqget(_p_); g != nil {
    63  			throw("runq is not empty initially")
    64  		}
    65  		for j := 0; j < i; j++ {
    66  			runqput(_p_, &gs[i], false)
    67  		}
    68  		for j := 0; j < i; j++ {
    69  			if g, _ := runqget(_p_); g != &gs[i] {
    70  				print("bad element at iter ", i, "/", j, "\n")
    71  				throw("bad element")
    72  			}
    73  		}
    74  		if g, _ := runqget(_p_); g != nil {
    75  			throw("runq is not empty afterwards")
    76  		}
    77  	}
    78  }
    79  
    80  func RunSchedLocalQueueStealTest() {
    81  	p1 := new(p)
    82  	p2 := new(p)
    83  	gs := make([]g, len(p1.runq))
    84  	for i := 0; i < len(p1.runq); i++ {
    85  		for j := 0; j < i; j++ {
    86  			gs[j].sig = 0
    87  			runqput(p1, &gs[j], false)
    88  		}
    89  		gp := runqsteal(p2, p1, true)
    90  		s := 0
    91  		if gp != nil {
    92  			s++
    93  			gp.sig++
    94  		}
    95  		for {
    96  			gp, _ = runqget(p2)
    97  			if gp == nil {
    98  				break
    99  			}
   100  			s++
   101  			gp.sig++
   102  		}
   103  		for {
   104  			gp, _ = runqget(p1)
   105  			if gp == nil {
   106  				break
   107  			}
   108  			gp.sig++
   109  		}
   110  		for j := 0; j < i; j++ {
   111  			if gs[j].sig != 1 {
   112  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   113  				throw("bad element")
   114  			}
   115  		}
   116  		if s != i/2 && s != i/2+1 {
   117  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   118  			throw("bad steal")
   119  		}
   120  	}
   121  }
   122  
   123  func RunSchedLocalQueueEmptyTest(iters int) {
   124  	// Test that runq is not spuriously reported as empty.
   125  	// Runq emptiness affects scheduling decisions and spurious emptiness
   126  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   127  	// for arbitrary long time).
   128  	done := make(chan bool, 1)
   129  	p := new(p)
   130  	gs := make([]g, 2)
   131  	ready := new(uint32)
   132  	for i := 0; i < iters; i++ {
   133  		*ready = 0
   134  		next0 := (i & 1) == 0
   135  		next1 := (i & 2) == 0
   136  		runqput(p, &gs[0], next0)
   137  		go func() {
   138  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   139  			}
   140  			if runqempty(p) {
   141  				println("next:", next0, next1)
   142  				throw("queue is empty")
   143  			}
   144  			done <- true
   145  		}()
   146  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   147  		}
   148  		runqput(p, &gs[1], next1)
   149  		runqget(p)
   150  		<-done
   151  		runqget(p)
   152  	}
   153  }
   154  
   155  var StringHash = stringHash
   156  var BytesHash = bytesHash
   157  var Int32Hash = int32Hash
   158  var Int64Hash = int64Hash
   159  var EfaceHash = efaceHash
   160  var IfaceHash = ifaceHash
   161  
   162  func MemclrBytes(b []byte) {
   163  	s := (*slice)(unsafe.Pointer(&b))
   164  	memclrNoHeapPointers(s.array, uintptr(s.len))
   165  }
   166  
   167  var HashLoad = &hashLoad
   168  
   169  // entry point for testing
   170  func GostringW(w []uint16) (s string) {
   171  	systemstack(func() {
   172  		s = gostringw(&w[0])
   173  	})
   174  	return
   175  }
   176  
   177  type Uintreg sys.Uintreg
   178  
   179  var Open = open
   180  var Close = closefd
   181  var Read = read
   182  var Write = write
   183  
   184  func Envs() []string     { return envs }
   185  func SetEnvs(e []string) { envs = e }
   186  
   187  var BigEndian = sys.BigEndian
   188  
   189  // For benchmarking.
   190  
   191  func BenchSetType(n int, x interface{}) {
   192  	e := *efaceOf(&x)
   193  	t := e._type
   194  	var size uintptr
   195  	var p unsafe.Pointer
   196  	switch t.kind & kindMask {
   197  	case kindPtr:
   198  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   199  		size = t.size
   200  		p = e.data
   201  	case kindSlice:
   202  		slice := *(*struct {
   203  			ptr      unsafe.Pointer
   204  			len, cap uintptr
   205  		})(e.data)
   206  		t = (*slicetype)(unsafe.Pointer(t)).elem
   207  		size = t.size * slice.len
   208  		p = slice.ptr
   209  	}
   210  	allocSize := roundupsize(size)
   211  	systemstack(func() {
   212  		for i := 0; i < n; i++ {
   213  			heapBitsSetType(uintptr(p), allocSize, size, t)
   214  		}
   215  	})
   216  }
   217  
   218  const PtrSize = sys.PtrSize
   219  
   220  var ForceGCPeriod = &forcegcperiod
   221  
   222  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   223  // the "environment" traceback level, so later calls to
   224  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   225  func SetTracebackEnv(level string) {
   226  	setTraceback(level)
   227  	traceback_env = traceback_cache
   228  }
   229  
   230  var ReadUnaligned32 = readUnaligned32
   231  var ReadUnaligned64 = readUnaligned64
   232  
   233  func CountPagesInUse() (pagesInUse, counted uintptr) {
   234  	stopTheWorld("CountPagesInUse")
   235  
   236  	pagesInUse = uintptr(mheap_.pagesInUse)
   237  
   238  	for _, s := range mheap_.allspans {
   239  		if s.state == mSpanInUse {
   240  			counted += s.npages
   241  		}
   242  	}
   243  
   244  	startTheWorld()
   245  
   246  	return
   247  }
   248  
   249  func Fastrand() uint32          { return fastrand() }
   250  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   251  
   252  type ProfBuf profBuf
   253  
   254  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   255  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   256  }
   257  
   258  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   259  	(*profBuf)(p).write(tag, now, hdr, stk)
   260  }
   261  
   262  const (
   263  	ProfBufBlocking    = profBufBlocking
   264  	ProfBufNonBlocking = profBufNonBlocking
   265  )
   266  
   267  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   268  	return (*profBuf)(p).read(profBufReadMode(mode))
   269  }
   270  
   271  func (p *ProfBuf) Close() {
   272  	(*profBuf)(p).close()
   273  }
   274  
   275  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   276  // MemStats accumulated by scanning the heap.
   277  func ReadMemStatsSlow() (base, slow MemStats) {
   278  	stopTheWorld("ReadMemStatsSlow")
   279  
   280  	// Run on the system stack to avoid stack growth allocation.
   281  	systemstack(func() {
   282  		// Make sure stats don't change.
   283  		getg().m.mallocing++
   284  
   285  		readmemstats_m(&base)
   286  
   287  		// Initialize slow from base and zero the fields we're
   288  		// recomputing.
   289  		slow = base
   290  		slow.Alloc = 0
   291  		slow.TotalAlloc = 0
   292  		slow.Mallocs = 0
   293  		slow.Frees = 0
   294  		var bySize [_NumSizeClasses]struct {
   295  			Mallocs, Frees uint64
   296  		}
   297  
   298  		// Add up current allocations in spans.
   299  		for _, s := range mheap_.allspans {
   300  			if s.state != mSpanInUse {
   301  				continue
   302  			}
   303  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   304  				slow.Mallocs++
   305  				slow.Alloc += uint64(s.elemsize)
   306  			} else {
   307  				slow.Mallocs += uint64(s.allocCount)
   308  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   309  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   310  			}
   311  		}
   312  
   313  		// Add in frees. readmemstats_m flushed the cached stats, so
   314  		// these are up-to-date.
   315  		var smallFree uint64
   316  		slow.Frees = mheap_.nlargefree
   317  		for i := range mheap_.nsmallfree {
   318  			slow.Frees += mheap_.nsmallfree[i]
   319  			bySize[i].Frees = mheap_.nsmallfree[i]
   320  			bySize[i].Mallocs += mheap_.nsmallfree[i]
   321  			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
   322  		}
   323  		slow.Frees += memstats.tinyallocs
   324  		slow.Mallocs += slow.Frees
   325  
   326  		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
   327  
   328  		for i := range slow.BySize {
   329  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   330  			slow.BySize[i].Frees = bySize[i].Frees
   331  		}
   332  
   333  		getg().m.mallocing--
   334  	})
   335  
   336  	startTheWorld()
   337  	return
   338  }
   339  
   340  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   341  // stderr, and blocks in a stack containing
   342  // "runtime.blockOnSystemStackInternal".
   343  func BlockOnSystemStack() {
   344  	systemstack(blockOnSystemStackInternal)
   345  }
   346  
   347  func blockOnSystemStackInternal() {
   348  	print("x\n")
   349  	lock(&deadlock)
   350  	lock(&deadlock)
   351  }
   352  
   353  type RWMutex struct {
   354  	rw rwmutex
   355  }
   356  
   357  func (rw *RWMutex) RLock() {
   358  	rw.rw.rlock()
   359  }
   360  
   361  func (rw *RWMutex) RUnlock() {
   362  	rw.rw.runlock()
   363  }
   364  
   365  func (rw *RWMutex) Lock() {
   366  	rw.rw.lock()
   367  }
   368  
   369  func (rw *RWMutex) Unlock() {
   370  	rw.rw.unlock()
   371  }