github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/proc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"github.com/x04/go/src/fmt"
     9  	"github.com/x04/go/src/math"
    10  	"github.com/x04/go/src/net"
    11  	"github.com/x04/go/src/runtime"
    12  	"github.com/x04/go/src/runtime/debug"
    13  	"github.com/x04/go/src/strings"
    14  	"github.com/x04/go/src/sync"
    15  	"github.com/x04/go/src/sync/atomic"
    16  	"github.com/x04/go/src/syscall"
    17  	"github.com/x04/go/src/testing"
    18  	"github.com/x04/go/src/time"
    19  )
    20  
    21  var stop = make(chan bool, 1)
    22  
    23  func perpetuumMobile() {
    24  	select {
    25  	case <-stop:
    26  	default:
    27  		go perpetuumMobile()
    28  	}
    29  }
    30  
    31  func TestStopTheWorldDeadlock(t *testing.T) {
    32  	if runtime.GOARCH == "wasm" {
    33  		t.Skip("no preemption on wasm yet")
    34  	}
    35  	if testing.Short() {
    36  		t.Skip("skipping during short test")
    37  	}
    38  	maxprocs := runtime.GOMAXPROCS(3)
    39  	compl := make(chan bool, 2)
    40  	go func() {
    41  		for i := 0; i != 1000; i += 1 {
    42  			runtime.GC()
    43  		}
    44  		compl <- true
    45  	}()
    46  	go func() {
    47  		for i := 0; i != 1000; i += 1 {
    48  			runtime.GOMAXPROCS(3)
    49  		}
    50  		compl <- true
    51  	}()
    52  	go perpetuumMobile()
    53  	<-compl
    54  	<-compl
    55  	stop <- true
    56  	runtime.GOMAXPROCS(maxprocs)
    57  }
    58  
    59  func TestYieldProgress(t *testing.T) {
    60  	testYieldProgress(false)
    61  }
    62  
    63  func TestYieldLockedProgress(t *testing.T) {
    64  	testYieldProgress(true)
    65  }
    66  
    67  func testYieldProgress(locked bool) {
    68  	c := make(chan bool)
    69  	cack := make(chan bool)
    70  	go func() {
    71  		if locked {
    72  			runtime.LockOSThread()
    73  		}
    74  		for {
    75  			select {
    76  			case <-c:
    77  				cack <- true
    78  				return
    79  			default:
    80  				runtime.Gosched()
    81  			}
    82  		}
    83  	}()
    84  	time.Sleep(10 * time.Millisecond)
    85  	c <- true
    86  	<-cack
    87  }
    88  
    89  func TestYieldLocked(t *testing.T) {
    90  	const N = 10
    91  	c := make(chan bool)
    92  	go func() {
    93  		runtime.LockOSThread()
    94  		for i := 0; i < N; i++ {
    95  			runtime.Gosched()
    96  			time.Sleep(time.Millisecond)
    97  		}
    98  		c <- true
    99  		// runtime.UnlockOSThread() is deliberately omitted
   100  	}()
   101  	<-c
   102  }
   103  
   104  func TestGoroutineParallelism(t *testing.T) {
   105  	if runtime.NumCPU() == 1 {
   106  		// Takes too long, too easy to deadlock, etc.
   107  		t.Skip("skipping on uniprocessor")
   108  	}
   109  	P := 4
   110  	N := 10
   111  	if testing.Short() {
   112  		P = 3
   113  		N = 3
   114  	}
   115  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   116  	// If runtime triggers a forced GC during this test then it will deadlock,
   117  	// since the goroutines can't be stopped/preempted.
   118  	// Disable GC for this test (see issue #10958).
   119  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   120  	for try := 0; try < N; try++ {
   121  		done := make(chan bool)
   122  		x := uint32(0)
   123  		for p := 0; p < P; p++ {
   124  			// Test that all P goroutines are scheduled at the same time
   125  			go func(p int) {
   126  				for i := 0; i < 3; i++ {
   127  					expected := uint32(P*i + p)
   128  					for atomic.LoadUint32(&x) != expected {
   129  					}
   130  					atomic.StoreUint32(&x, expected+1)
   131  				}
   132  				done <- true
   133  			}(p)
   134  		}
   135  		for p := 0; p < P; p++ {
   136  			<-done
   137  		}
   138  	}
   139  }
   140  
   141  // Test that all runnable goroutines are scheduled at the same time.
   142  func TestGoroutineParallelism2(t *testing.T) {
   143  	//testGoroutineParallelism2(t, false, false)
   144  	testGoroutineParallelism2(t, true, false)
   145  	testGoroutineParallelism2(t, false, true)
   146  	testGoroutineParallelism2(t, true, true)
   147  }
   148  
   149  func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
   150  	if runtime.NumCPU() == 1 {
   151  		// Takes too long, too easy to deadlock, etc.
   152  		t.Skip("skipping on uniprocessor")
   153  	}
   154  	P := 4
   155  	N := 10
   156  	if testing.Short() {
   157  		N = 3
   158  	}
   159  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   160  	// If runtime triggers a forced GC during this test then it will deadlock,
   161  	// since the goroutines can't be stopped/preempted.
   162  	// Disable GC for this test (see issue #10958).
   163  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   164  	for try := 0; try < N; try++ {
   165  		if load {
   166  			// Create P goroutines and wait until they all run.
   167  			// When we run the actual test below, worker threads
   168  			// running the goroutines will start parking.
   169  			done := make(chan bool)
   170  			x := uint32(0)
   171  			for p := 0; p < P; p++ {
   172  				go func() {
   173  					if atomic.AddUint32(&x, 1) == uint32(P) {
   174  						done <- true
   175  						return
   176  					}
   177  					for atomic.LoadUint32(&x) != uint32(P) {
   178  					}
   179  				}()
   180  			}
   181  			<-done
   182  		}
   183  		if netpoll {
   184  			// Enable netpoller, affects schedler behavior.
   185  			laddr := "localhost:0"
   186  			if runtime.GOOS == "android" {
   187  				// On some Android devices, there are no records for localhost,
   188  				// see https://golang.org/issues/14486.
   189  				// Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
   190  				laddr = "127.0.0.1:0"
   191  			}
   192  			ln, err := net.Listen("tcp", laddr)
   193  			if err != nil {
   194  				defer ln.Close()	// yup, defer in a loop
   195  			}
   196  		}
   197  		done := make(chan bool)
   198  		x := uint32(0)
   199  		// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
   200  		for p := 0; p < P/2; p++ {
   201  			go func(p int) {
   202  				for p2 := 0; p2 < 2; p2++ {
   203  					go func(p2 int) {
   204  						for i := 0; i < 3; i++ {
   205  							expected := uint32(P*i + p*2 + p2)
   206  							for atomic.LoadUint32(&x) != expected {
   207  							}
   208  							atomic.StoreUint32(&x, expected+1)
   209  						}
   210  						done <- true
   211  					}(p2)
   212  				}
   213  			}(p)
   214  		}
   215  		for p := 0; p < P; p++ {
   216  			<-done
   217  		}
   218  	}
   219  }
   220  
   221  func TestBlockLocked(t *testing.T) {
   222  	const N = 10
   223  	c := make(chan bool)
   224  	go func() {
   225  		runtime.LockOSThread()
   226  		for i := 0; i < N; i++ {
   227  			c <- true
   228  		}
   229  		runtime.UnlockOSThread()
   230  	}()
   231  	for i := 0; i < N; i++ {
   232  		<-c
   233  	}
   234  }
   235  
   236  func TestTimerFairness(t *testing.T) {
   237  	if runtime.GOARCH == "wasm" {
   238  		t.Skip("no preemption on wasm yet")
   239  	}
   240  
   241  	done := make(chan bool)
   242  	c := make(chan bool)
   243  	for i := 0; i < 2; i++ {
   244  		go func() {
   245  			for {
   246  				select {
   247  				case c <- true:
   248  				case <-done:
   249  					return
   250  				}
   251  			}
   252  		}()
   253  	}
   254  
   255  	timer := time.After(20 * time.Millisecond)
   256  	for {
   257  		select {
   258  		case <-c:
   259  		case <-timer:
   260  			close(done)
   261  			return
   262  		}
   263  	}
   264  }
   265  
   266  func TestTimerFairness2(t *testing.T) {
   267  	if runtime.GOARCH == "wasm" {
   268  		t.Skip("no preemption on wasm yet")
   269  	}
   270  
   271  	done := make(chan bool)
   272  	c := make(chan bool)
   273  	for i := 0; i < 2; i++ {
   274  		go func() {
   275  			timer := time.After(20 * time.Millisecond)
   276  			var buf [1]byte
   277  			for {
   278  				syscall.Read(0, buf[0:0])
   279  				select {
   280  				case c <- true:
   281  				case <-c:
   282  				case <-timer:
   283  					done <- true
   284  					return
   285  				}
   286  			}
   287  		}()
   288  	}
   289  	<-done
   290  	<-done
   291  }
   292  
   293  // The function is used to test preemption at split stack checks.
   294  // Declaring a var avoids inlining at the call site.
   295  var preempt = func() int {
   296  	var a [128]int
   297  	sum := 0
   298  	for _, v := range a {
   299  		sum += v
   300  	}
   301  	return sum
   302  }
   303  
   304  func TestPreemption(t *testing.T) {
   305  	if runtime.GOARCH == "wasm" {
   306  		t.Skip("no preemption on wasm yet")
   307  	}
   308  
   309  	// Test that goroutines are preempted at function calls.
   310  	N := 5
   311  	if testing.Short() {
   312  		N = 2
   313  	}
   314  	c := make(chan bool)
   315  	var x uint32
   316  	for g := 0; g < 2; g++ {
   317  		go func(g int) {
   318  			for i := 0; i < N; i++ {
   319  				for atomic.LoadUint32(&x) != uint32(g) {
   320  					preempt()
   321  				}
   322  				atomic.StoreUint32(&x, uint32(1-g))
   323  			}
   324  			c <- true
   325  		}(g)
   326  	}
   327  	<-c
   328  	<-c
   329  }
   330  
   331  func TestPreemptionGC(t *testing.T) {
   332  	if runtime.GOARCH == "wasm" {
   333  		t.Skip("no preemption on wasm yet")
   334  	}
   335  
   336  	// Test that pending GC preempts running goroutines.
   337  	P := 5
   338  	N := 10
   339  	if testing.Short() {
   340  		P = 3
   341  		N = 2
   342  	}
   343  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
   344  	var stop uint32
   345  	for i := 0; i < P; i++ {
   346  		go func() {
   347  			for atomic.LoadUint32(&stop) == 0 {
   348  				preempt()
   349  			}
   350  		}()
   351  	}
   352  	for i := 0; i < N; i++ {
   353  		runtime.Gosched()
   354  		runtime.GC()
   355  	}
   356  	atomic.StoreUint32(&stop, 1)
   357  }
   358  
   359  func TestAsyncPreempt(t *testing.T) {
   360  	if !runtime.PreemptMSupported {
   361  		t.Skip("asynchronous preemption not supported on this platform")
   362  	}
   363  	output := runTestProg(t, "testprog", "AsyncPreempt")
   364  	want := "OK\n"
   365  	if output != want {
   366  		t.Fatalf("want %s, got %s\n", want, output)
   367  	}
   368  }
   369  
   370  func TestGCFairness(t *testing.T) {
   371  	output := runTestProg(t, "testprog", "GCFairness")
   372  	want := "OK\n"
   373  	if output != want {
   374  		t.Fatalf("want %s, got %s\n", want, output)
   375  	}
   376  }
   377  
   378  func TestGCFairness2(t *testing.T) {
   379  	output := runTestProg(t, "testprog", "GCFairness2")
   380  	want := "OK\n"
   381  	if output != want {
   382  		t.Fatalf("want %s, got %s\n", want, output)
   383  	}
   384  }
   385  
   386  func TestNumGoroutine(t *testing.T) {
   387  	output := runTestProg(t, "testprog", "NumGoroutine")
   388  	want := "1\n"
   389  	if output != want {
   390  		t.Fatalf("want %q, got %q", want, output)
   391  	}
   392  
   393  	buf := make([]byte, 1<<20)
   394  
   395  	// Try up to 10 times for a match before giving up.
   396  	// This is a fundamentally racy check but it's important
   397  	// to notice if NumGoroutine and Stack are _always_ out of sync.
   398  	for i := 0; ; i++ {
   399  		// Give goroutines about to exit a chance to exit.
   400  		// The NumGoroutine and Stack below need to see
   401  		// the same state of the world, so anything we can do
   402  		// to keep it quiet is good.
   403  		runtime.Gosched()
   404  
   405  		n := runtime.NumGoroutine()
   406  		buf = buf[:runtime.Stack(buf, true)]
   407  
   408  		nstk := strings.Count(string(buf), "goroutine ")
   409  		if n == nstk {
   410  			break
   411  		}
   412  		if i >= 10 {
   413  			t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
   414  		}
   415  	}
   416  }
   417  
   418  func TestPingPongHog(t *testing.T) {
   419  	if runtime.GOARCH == "wasm" {
   420  		t.Skip("no preemption on wasm yet")
   421  	}
   422  	if testing.Short() {
   423  		t.Skip("skipping in -short mode")
   424  	}
   425  
   426  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   427  	done := make(chan bool)
   428  	hogChan, lightChan := make(chan bool), make(chan bool)
   429  	hogCount, lightCount := 0, 0
   430  
   431  	run := func(limit int, counter *int, wake chan bool) {
   432  		for {
   433  			select {
   434  			case <-done:
   435  				return
   436  
   437  			case <-wake:
   438  				for i := 0; i < limit; i++ {
   439  					*counter++
   440  				}
   441  				wake <- true
   442  			}
   443  		}
   444  	}
   445  
   446  	// Start two co-scheduled hog goroutines.
   447  	for i := 0; i < 2; i++ {
   448  		go run(1e6, &hogCount, hogChan)
   449  	}
   450  
   451  	// Start two co-scheduled light goroutines.
   452  	for i := 0; i < 2; i++ {
   453  		go run(1e3, &lightCount, lightChan)
   454  	}
   455  
   456  	// Start goroutine pairs and wait for a few preemption rounds.
   457  	hogChan <- true
   458  	lightChan <- true
   459  	time.Sleep(100 * time.Millisecond)
   460  	close(done)
   461  	<-hogChan
   462  	<-lightChan
   463  
   464  	// Check that hogCount and lightCount are within a factor of
   465  	// 5, which indicates that both pairs of goroutines handed off
   466  	// the P within a time-slice to their buddy. We can use a
   467  	// fairly large factor here to make this robust: if the
   468  	// scheduler isn't working right, the gap should be ~1000X.
   469  	const factor = 5
   470  	if hogCount > lightCount*factor || lightCount > hogCount*factor {
   471  		t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
   472  	}
   473  }
   474  
   475  func BenchmarkPingPongHog(b *testing.B) {
   476  	if b.N == 0 {
   477  		return
   478  	}
   479  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   480  
   481  	// Create a CPU hog
   482  	stop, done := make(chan bool), make(chan bool)
   483  	go func() {
   484  		for {
   485  			select {
   486  			case <-stop:
   487  				done <- true
   488  				return
   489  			default:
   490  			}
   491  		}
   492  	}()
   493  
   494  	// Ping-pong b.N times
   495  	ping, pong := make(chan bool), make(chan bool)
   496  	go func() {
   497  		for j := 0; j < b.N; j++ {
   498  			pong <- <-ping
   499  		}
   500  		close(stop)
   501  		done <- true
   502  	}()
   503  	go func() {
   504  		for i := 0; i < b.N; i++ {
   505  			ping <- <-pong
   506  		}
   507  		done <- true
   508  	}()
   509  	b.ResetTimer()
   510  	ping <- true	// Start ping-pong
   511  	<-stop
   512  	b.StopTimer()
   513  	<-ping	// Let last ponger exit
   514  	<-done	// Make sure goroutines exit
   515  	<-done
   516  	<-done
   517  }
   518  
   519  func stackGrowthRecursive(i int) {
   520  	var pad [128]uint64
   521  	if i != 0 && pad[0] == 0 {
   522  		stackGrowthRecursive(i - 1)
   523  	}
   524  }
   525  
   526  func TestPreemptSplitBig(t *testing.T) {
   527  	if testing.Short() {
   528  		t.Skip("skipping in -short mode")
   529  	}
   530  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   531  	stop := make(chan int)
   532  	go big(stop)
   533  	for i := 0; i < 3; i++ {
   534  		time.Sleep(10 * time.Microsecond)	// let big start running
   535  		runtime.GC()
   536  	}
   537  	close(stop)
   538  }
   539  
   540  func big(stop chan int) int {
   541  	n := 0
   542  	for {
   543  		// delay so that gc is sure to have asked for a preemption
   544  		for i := 0; i < 1e9; i++ {
   545  			n++
   546  		}
   547  
   548  		// call bigframe, which used to miss the preemption in its prologue.
   549  		bigframe(stop)
   550  
   551  		// check if we've been asked to stop.
   552  		select {
   553  		case <-stop:
   554  			return n
   555  		}
   556  	}
   557  }
   558  
   559  func bigframe(stop chan int) int {
   560  	// not splitting the stack will overflow.
   561  	// small will notice that it needs a stack split and will
   562  	// catch the overflow.
   563  	var x [8192]byte
   564  	return small(stop, &x)
   565  }
   566  
   567  func small(stop chan int, x *[8192]byte) int {
   568  	for i := range x {
   569  		x[i] = byte(i)
   570  	}
   571  	sum := 0
   572  	for i := range x {
   573  		sum += int(x[i])
   574  	}
   575  
   576  	// keep small from being a leaf function, which might
   577  	// make it not do any stack check at all.
   578  	nonleaf(stop)
   579  
   580  	return sum
   581  }
   582  
   583  func nonleaf(stop chan int) bool {
   584  	// do something that won't be inlined:
   585  	select {
   586  	case <-stop:
   587  		return true
   588  	default:
   589  		return false
   590  	}
   591  }
   592  
   593  func TestSchedLocalQueue(t *testing.T) {
   594  	runtime.RunSchedLocalQueueTest()
   595  }
   596  
   597  func TestSchedLocalQueueSteal(t *testing.T) {
   598  	runtime.RunSchedLocalQueueStealTest()
   599  }
   600  
   601  func TestSchedLocalQueueEmpty(t *testing.T) {
   602  	if runtime.NumCPU() == 1 {
   603  		// Takes too long and does not trigger the race.
   604  		t.Skip("skipping on uniprocessor")
   605  	}
   606  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   607  
   608  	// If runtime triggers a forced GC during this test then it will deadlock,
   609  	// since the goroutines can't be stopped/preempted during spin wait.
   610  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   611  
   612  	iters := int(1e5)
   613  	if testing.Short() {
   614  		iters = 1e2
   615  	}
   616  	runtime.RunSchedLocalQueueEmptyTest(iters)
   617  }
   618  
   619  func benchmarkStackGrowth(b *testing.B, rec int) {
   620  	b.RunParallel(func(pb *testing.PB) {
   621  		for pb.Next() {
   622  			stackGrowthRecursive(rec)
   623  		}
   624  	})
   625  }
   626  
   627  func BenchmarkStackGrowth(b *testing.B) {
   628  	benchmarkStackGrowth(b, 10)
   629  }
   630  
   631  func BenchmarkStackGrowthDeep(b *testing.B) {
   632  	benchmarkStackGrowth(b, 1024)
   633  }
   634  
   635  func BenchmarkCreateGoroutines(b *testing.B) {
   636  	benchmarkCreateGoroutines(b, 1)
   637  }
   638  
   639  func BenchmarkCreateGoroutinesParallel(b *testing.B) {
   640  	benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
   641  }
   642  
   643  func benchmarkCreateGoroutines(b *testing.B, procs int) {
   644  	c := make(chan bool)
   645  	var f func(n int)
   646  	f = func(n int) {
   647  		if n == 0 {
   648  			c <- true
   649  			return
   650  		}
   651  		go f(n - 1)
   652  	}
   653  	for i := 0; i < procs; i++ {
   654  		go f(b.N / procs)
   655  	}
   656  	for i := 0; i < procs; i++ {
   657  		<-c
   658  	}
   659  }
   660  
   661  func BenchmarkCreateGoroutinesCapture(b *testing.B) {
   662  	b.ReportAllocs()
   663  	for i := 0; i < b.N; i++ {
   664  		const N = 4
   665  		var wg sync.WaitGroup
   666  		wg.Add(N)
   667  		for i := 0; i < N; i++ {
   668  			i := i
   669  			go func() {
   670  				if i >= N {
   671  					b.Logf("bad")	// just to capture b
   672  				}
   673  				wg.Done()
   674  			}()
   675  		}
   676  		wg.Wait()
   677  	}
   678  }
   679  
   680  func BenchmarkClosureCall(b *testing.B) {
   681  	sum := 0
   682  	off1 := 1
   683  	for i := 0; i < b.N; i++ {
   684  		off2 := 2
   685  		func() {
   686  			sum += i + off1 + off2
   687  		}()
   688  	}
   689  	_ = sum
   690  }
   691  
   692  func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
   693  	if runtime.GOMAXPROCS(0) == 1 {
   694  		b.Skip("skipping: GOMAXPROCS=1")
   695  	}
   696  
   697  	wakeDelay := 5 * time.Microsecond
   698  	for _, delay := range []time.Duration{
   699  		0,
   700  		1 * time.Microsecond,
   701  		2 * time.Microsecond,
   702  		5 * time.Microsecond,
   703  		10 * time.Microsecond,
   704  		20 * time.Microsecond,
   705  		50 * time.Microsecond,
   706  		100 * time.Microsecond,
   707  	} {
   708  		b.Run(delay.String(), func(b *testing.B) {
   709  			if b.N == 0 {
   710  				return
   711  			}
   712  			// Start two goroutines, which alternate between being
   713  			// sender and receiver in the following protocol:
   714  			//
   715  			// - The receiver spins for `delay` and then does a
   716  			// blocking receive on a channel.
   717  			//
   718  			// - The sender spins for `delay+wakeDelay` and then
   719  			// sends to the same channel. (The addition of
   720  			// `wakeDelay` improves the probability that the
   721  			// receiver will be blocking when the send occurs when
   722  			// the goroutines execute in parallel.)
   723  			//
   724  			// In each iteration of the benchmark, each goroutine
   725  			// acts once as sender and once as receiver, so each
   726  			// goroutine spins for delay twice.
   727  			//
   728  			// BenchmarkWakeupParallel is used to estimate how
   729  			// efficiently the scheduler parallelizes goroutines in
   730  			// the presence of blocking:
   731  			//
   732  			// - If both goroutines are executed on the same core,
   733  			// an increase in delay by N will increase the time per
   734  			// iteration by 4*N, because all 4 delays are
   735  			// serialized.
   736  			//
   737  			// - Otherwise, an increase in delay by N will increase
   738  			// the time per iteration by 2*N, and the time per
   739  			// iteration is 2 * (runtime overhead + chan
   740  			// send/receive pair + delay + wakeDelay). This allows
   741  			// the runtime overhead, including the time it takes
   742  			// for the unblocked goroutine to be scheduled, to be
   743  			// estimated.
   744  			ping, pong := make(chan struct{}), make(chan struct{})
   745  			start := make(chan struct{})
   746  			done := make(chan struct{})
   747  			go func() {
   748  				<-start
   749  				for i := 0; i < b.N; i++ {
   750  					// sender
   751  					spin(delay + wakeDelay)
   752  					ping <- struct{}{}
   753  					// receiver
   754  					spin(delay)
   755  					<-pong
   756  				}
   757  				done <- struct{}{}
   758  			}()
   759  			go func() {
   760  				for i := 0; i < b.N; i++ {
   761  					// receiver
   762  					spin(delay)
   763  					<-ping
   764  					// sender
   765  					spin(delay + wakeDelay)
   766  					pong <- struct{}{}
   767  				}
   768  				done <- struct{}{}
   769  			}()
   770  			b.ResetTimer()
   771  			start <- struct{}{}
   772  			<-done
   773  			<-done
   774  		})
   775  	}
   776  }
   777  
   778  func BenchmarkWakeupParallelSpinning(b *testing.B) {
   779  	benchmarkWakeupParallel(b, func(d time.Duration) {
   780  		end := time.Now().Add(d)
   781  		for time.Now().Before(end) {
   782  			// do nothing
   783  		}
   784  	})
   785  }
   786  
   787  // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
   788  // to sleep for the given duration. If nil, dependent tests are skipped.
   789  // The implementation should invoke a blocking system call and not
   790  // call time.Sleep, which would deschedule the goroutine.
   791  var sysNanosleep func(d time.Duration)
   792  
   793  func BenchmarkWakeupParallelSyscall(b *testing.B) {
   794  	if sysNanosleep == nil {
   795  		b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
   796  	}
   797  	benchmarkWakeupParallel(b, func(d time.Duration) {
   798  		sysNanosleep(d)
   799  	})
   800  }
   801  
   802  type Matrix [][]float64
   803  
   804  func BenchmarkMatmult(b *testing.B) {
   805  	b.StopTimer()
   806  	// matmult is O(N**3) but testing expects O(b.N),
   807  	// so we need to take cube root of b.N
   808  	n := int(math.Cbrt(float64(b.N))) + 1
   809  	A := makeMatrix(n)
   810  	B := makeMatrix(n)
   811  	C := makeMatrix(n)
   812  	b.StartTimer()
   813  	matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
   814  }
   815  
   816  func makeMatrix(n int) Matrix {
   817  	m := make(Matrix, n)
   818  	for i := 0; i < n; i++ {
   819  		m[i] = make([]float64, n)
   820  		for j := 0; j < n; j++ {
   821  			m[i][j] = float64(i*n + j)
   822  		}
   823  	}
   824  	return m
   825  }
   826  
   827  func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
   828  	di := i1 - i0
   829  	dj := j1 - j0
   830  	dk := k1 - k0
   831  	if di >= dj && di >= dk && di >= threshold {
   832  		// divide in two by y axis
   833  		mi := i0 + di/2
   834  		done1 := make(chan struct{}, 1)
   835  		go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
   836  		matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
   837  		<-done1
   838  	} else if dj >= dk && dj >= threshold {
   839  		// divide in two by x axis
   840  		mj := j0 + dj/2
   841  		done1 := make(chan struct{}, 1)
   842  		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
   843  		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
   844  		<-done1
   845  	} else if dk >= threshold {
   846  		// divide in two by "k" axis
   847  		// deliberately not parallel because of data races
   848  		mk := k0 + dk/2
   849  		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
   850  		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
   851  	} else {
   852  		// the matrices are small enough, compute directly
   853  		for i := i0; i < i1; i++ {
   854  			for j := j0; j < j1; j++ {
   855  				for k := k0; k < k1; k++ {
   856  					C[i][j] += A[i][k] * B[k][j]
   857  				}
   858  			}
   859  		}
   860  	}
   861  	if done != nil {
   862  		done <- struct{}{}
   863  	}
   864  }
   865  
   866  func TestStealOrder(t *testing.T) {
   867  	runtime.RunStealOrderTest()
   868  }
   869  
   870  func TestLockOSThreadNesting(t *testing.T) {
   871  	if runtime.GOARCH == "wasm" {
   872  		t.Skip("no threads on wasm yet")
   873  	}
   874  
   875  	go func() {
   876  		e, i := runtime.LockOSCounts()
   877  		if e != 0 || i != 0 {
   878  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   879  			return
   880  		}
   881  		runtime.LockOSThread()
   882  		runtime.LockOSThread()
   883  		runtime.UnlockOSThread()
   884  		e, i = runtime.LockOSCounts()
   885  		if e != 1 || i != 0 {
   886  			t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
   887  			return
   888  		}
   889  		runtime.UnlockOSThread()
   890  		e, i = runtime.LockOSCounts()
   891  		if e != 0 || i != 0 {
   892  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   893  			return
   894  		}
   895  	}()
   896  }
   897  
   898  func TestLockOSThreadExit(t *testing.T) {
   899  	testLockOSThreadExit(t, "testprog")
   900  }
   901  
   902  func testLockOSThreadExit(t *testing.T, prog string) {
   903  	output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
   904  	want := "OK\n"
   905  	if output != want {
   906  		t.Errorf("want %q, got %q", want, output)
   907  	}
   908  
   909  	output = runTestProg(t, prog, "LockOSThreadAlt")
   910  	if output != want {
   911  		t.Errorf("want %q, got %q", want, output)
   912  	}
   913  }
   914  
   915  func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
   916  	want := "OK\n"
   917  	skip := "unshare not permitted\n"
   918  	output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
   919  	if output == skip {
   920  		t.Skip("unshare syscall not permitted on this system")
   921  	} else if output != want {
   922  		t.Errorf("want %q, got %q", want, output)
   923  	}
   924  }
   925  
   926  // fakeSyscall emulates a system call.
   927  //go:nosplit
   928  func fakeSyscall(duration time.Duration) {
   929  	runtime.Entersyscall()
   930  	for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
   931  	}
   932  	runtime.Exitsyscall()
   933  }
   934  
   935  // Check that a goroutine will be preempted if it is calling short system calls.
   936  func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
   937  	if runtime.GOARCH == "wasm" {
   938  		t.Skip("no preemption on wasm yet")
   939  	}
   940  
   941  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   942  
   943  	interations := 10
   944  	if testing.Short() {
   945  		interations = 1
   946  	}
   947  	const (
   948  		maxDuration	= 3 * time.Second
   949  		nroutines	= 8
   950  	)
   951  
   952  	for i := 0; i < interations; i++ {
   953  		c := make(chan bool, nroutines)
   954  		stop := uint32(0)
   955  
   956  		start := time.Now()
   957  		for g := 0; g < nroutines; g++ {
   958  			go func(stop *uint32) {
   959  				c <- true
   960  				for atomic.LoadUint32(stop) == 0 {
   961  					fakeSyscall(syscallDuration)
   962  				}
   963  				c <- true
   964  			}(&stop)
   965  		}
   966  		// wait until all goroutines have started.
   967  		for g := 0; g < nroutines; g++ {
   968  			<-c
   969  		}
   970  		atomic.StoreUint32(&stop, 1)
   971  		// wait until all goroutines have finished.
   972  		for g := 0; g < nroutines; g++ {
   973  			<-c
   974  		}
   975  		duration := time.Since(start)
   976  
   977  		if duration > maxDuration {
   978  			t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
   979  		}
   980  	}
   981  }
   982  
   983  func TestPreemptionAfterSyscall(t *testing.T) {
   984  	for _, i := range []time.Duration{10, 100, 1000} {
   985  		d := i * time.Microsecond
   986  		t.Run(fmt.Sprint(d), func(t *testing.T) {
   987  			testPreemptionAfterSyscall(t, d)
   988  		})
   989  	}
   990  }
   991  
   992  func TestGetgThreadSwitch(t *testing.T) {
   993  	runtime.RunGetgThreadSwitchTest()
   994  }
   995  
   996  // TestNetpollBreak tests that netpollBreak can break a netpoll.
   997  // This test is not particularly safe since the call to netpoll
   998  // will pick up any stray files that are ready, but it should work
   999  // OK as long it is not run in parallel.
  1000  func TestNetpollBreak(t *testing.T) {
  1001  	if runtime.GOMAXPROCS(0) == 1 {
  1002  		t.Skip("skipping: GOMAXPROCS=1")
  1003  	}
  1004  
  1005  	// Make sure that netpoll is initialized.
  1006  	runtime.NetpollGenericInit()
  1007  
  1008  	start := time.Now()
  1009  	c := make(chan bool, 2)
  1010  	go func() {
  1011  		c <- true
  1012  		runtime.Netpoll(10 * time.Second.Nanoseconds())
  1013  		c <- true
  1014  	}()
  1015  	<-c
  1016  	// Loop because the break might get eaten by the scheduler.
  1017  	// Break twice to break both the netpoll we started and the
  1018  	// scheduler netpoll.
  1019  loop:
  1020  	for {
  1021  		runtime.Usleep(100)
  1022  		runtime.NetpollBreak()
  1023  		runtime.NetpollBreak()
  1024  		select {
  1025  		case <-c:
  1026  			break loop
  1027  		default:
  1028  		}
  1029  	}
  1030  	if dur := time.Since(start); dur > 5*time.Second {
  1031  		t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur)
  1032  	}
  1033  }