github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/proc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"math"
     9  	"runtime"
    10  	"runtime/debug"
    11  	"sync"
    12  	"sync/atomic"
    13  	"syscall"
    14  	"testing"
    15  	"time"
    16  )
    17  
    18  var stop = make(chan bool, 1)
    19  
    20  func perpetuumMobile() {
    21  	select {
    22  	case <-stop:
    23  	default:
    24  		go perpetuumMobile()
    25  	}
    26  }
    27  
    28  func TestStopTheWorldDeadlock(t *testing.T) {
    29  	if testing.Short() {
    30  		t.Skip("skipping during short test")
    31  	}
    32  	maxprocs := runtime.GOMAXPROCS(3)
    33  	compl := make(chan bool, 2)
    34  	go func() {
    35  		for i := 0; i != 1000; i += 1 {
    36  			runtime.GC()
    37  		}
    38  		compl <- true
    39  	}()
    40  	go func() {
    41  		for i := 0; i != 1000; i += 1 {
    42  			runtime.GOMAXPROCS(3)
    43  		}
    44  		compl <- true
    45  	}()
    46  	go perpetuumMobile()
    47  	<-compl
    48  	<-compl
    49  	stop <- true
    50  	runtime.GOMAXPROCS(maxprocs)
    51  }
    52  
    53  func TestYieldProgress(t *testing.T) {
    54  	testYieldProgress(t, false)
    55  }
    56  
    57  func TestYieldLockedProgress(t *testing.T) {
    58  	testYieldProgress(t, true)
    59  }
    60  
    61  func testYieldProgress(t *testing.T, locked bool) {
    62  	c := make(chan bool)
    63  	cack := make(chan bool)
    64  	go func() {
    65  		if locked {
    66  			runtime.LockOSThread()
    67  		}
    68  		for {
    69  			select {
    70  			case <-c:
    71  				cack <- true
    72  				return
    73  			default:
    74  				runtime.Gosched()
    75  			}
    76  		}
    77  	}()
    78  	time.Sleep(10 * time.Millisecond)
    79  	c <- true
    80  	<-cack
    81  }
    82  
    83  func TestYieldLocked(t *testing.T) {
    84  	const N = 10
    85  	c := make(chan bool)
    86  	go func() {
    87  		runtime.LockOSThread()
    88  		for i := 0; i < N; i++ {
    89  			runtime.Gosched()
    90  			time.Sleep(time.Millisecond)
    91  		}
    92  		c <- true
    93  		// runtime.UnlockOSThread() is deliberately omitted
    94  	}()
    95  	<-c
    96  }
    97  
    98  func TestGoroutineParallelism(t *testing.T) {
    99  	if runtime.NumCPU() == 1 {
   100  		// Takes too long, too easy to deadlock, etc.
   101  		t.Skip("skipping on uniprocessor")
   102  	}
   103  	P := 4
   104  	N := 10
   105  	if testing.Short() {
   106  		P = 3
   107  		N = 3
   108  	}
   109  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   110  	// If runtime triggers a forced GC during this test then it will deadlock,
   111  	// since the goroutines can't be stopped/preempted.
   112  	// Disable GC for this test (see issue #10958).
   113  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   114  	for try := 0; try < N; try++ {
   115  		done := make(chan bool)
   116  		x := uint32(0)
   117  		for p := 0; p < P; p++ {
   118  			// Test that all P goroutines are scheduled at the same time
   119  			go func(p int) {
   120  				for i := 0; i < 3; i++ {
   121  					expected := uint32(P*i + p)
   122  					for atomic.LoadUint32(&x) != expected {
   123  					}
   124  					atomic.StoreUint32(&x, expected+1)
   125  				}
   126  				done <- true
   127  			}(p)
   128  		}
   129  		for p := 0; p < P; p++ {
   130  			<-done
   131  		}
   132  	}
   133  }
   134  
   135  func TestBlockLocked(t *testing.T) {
   136  	const N = 10
   137  	c := make(chan bool)
   138  	go func() {
   139  		runtime.LockOSThread()
   140  		for i := 0; i < N; i++ {
   141  			c <- true
   142  		}
   143  		runtime.UnlockOSThread()
   144  	}()
   145  	for i := 0; i < N; i++ {
   146  		<-c
   147  	}
   148  }
   149  
   150  func TestTimerFairness(t *testing.T) {
   151  	done := make(chan bool)
   152  	c := make(chan bool)
   153  	for i := 0; i < 2; i++ {
   154  		go func() {
   155  			for {
   156  				select {
   157  				case c <- true:
   158  				case <-done:
   159  					return
   160  				}
   161  			}
   162  		}()
   163  	}
   164  
   165  	timer := time.After(20 * time.Millisecond)
   166  	for {
   167  		select {
   168  		case <-c:
   169  		case <-timer:
   170  			close(done)
   171  			return
   172  		}
   173  	}
   174  }
   175  
   176  func TestTimerFairness2(t *testing.T) {
   177  	done := make(chan bool)
   178  	c := make(chan bool)
   179  	for i := 0; i < 2; i++ {
   180  		go func() {
   181  			timer := time.After(20 * time.Millisecond)
   182  			var buf [1]byte
   183  			for {
   184  				syscall.Read(0, buf[0:0])
   185  				select {
   186  				case c <- true:
   187  				case <-c:
   188  				case <-timer:
   189  					done <- true
   190  					return
   191  				}
   192  			}
   193  		}()
   194  	}
   195  	<-done
   196  	<-done
   197  }
   198  
   199  // The function is used to test preemption at split stack checks.
   200  // Declaring a var avoids inlining at the call site.
   201  var preempt = func() int {
   202  	var a [128]int
   203  	sum := 0
   204  	for _, v := range a {
   205  		sum += v
   206  	}
   207  	return sum
   208  }
   209  
   210  func TestPreemption(t *testing.T) {
   211  	// Test that goroutines are preempted at function calls.
   212  	N := 5
   213  	if testing.Short() {
   214  		N = 2
   215  	}
   216  	c := make(chan bool)
   217  	var x uint32
   218  	for g := 0; g < 2; g++ {
   219  		go func(g int) {
   220  			for i := 0; i < N; i++ {
   221  				for atomic.LoadUint32(&x) != uint32(g) {
   222  					preempt()
   223  				}
   224  				atomic.StoreUint32(&x, uint32(1-g))
   225  			}
   226  			c <- true
   227  		}(g)
   228  	}
   229  	<-c
   230  	<-c
   231  }
   232  
   233  func TestPreemptionGC(t *testing.T) {
   234  	// Test that pending GC preempts running goroutines.
   235  	P := 5
   236  	N := 10
   237  	if testing.Short() {
   238  		P = 3
   239  		N = 2
   240  	}
   241  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
   242  	var stop uint32
   243  	for i := 0; i < P; i++ {
   244  		go func() {
   245  			for atomic.LoadUint32(&stop) == 0 {
   246  				preempt()
   247  			}
   248  		}()
   249  	}
   250  	for i := 0; i < N; i++ {
   251  		runtime.Gosched()
   252  		runtime.GC()
   253  	}
   254  	atomic.StoreUint32(&stop, 1)
   255  }
   256  
   257  func TestGCFairness(t *testing.T) {
   258  	output := executeTest(t, testGCFairnessSource, nil)
   259  	want := "OK\n"
   260  	if output != want {
   261  		t.Fatalf("want %s, got %s\n", want, output)
   262  	}
   263  }
   264  
   265  const testGCFairnessSource = `
   266  package main
   267  
   268  import (
   269  	"fmt"
   270  	"os"
   271  	"runtime"
   272  	"time"
   273  )
   274  
   275  func main() {
   276  	runtime.GOMAXPROCS(1)
   277  	f, err := os.Open("/dev/null")
   278  	if os.IsNotExist(err) {
   279  		// This test tests what it is intended to test only if writes are fast.
   280  		// If there is no /dev/null, we just don't execute the test.
   281  		fmt.Println("OK")
   282  		return
   283  	}
   284  	if err != nil {
   285  		fmt.Println(err)
   286  		os.Exit(1)
   287  	}
   288  	for i := 0; i < 2; i++ {
   289  		go func() {
   290  			for {
   291  				f.Write([]byte("."))
   292  			}
   293  		}()
   294  	}
   295  	time.Sleep(10 * time.Millisecond)
   296  	fmt.Println("OK")
   297  }
   298  `
   299  
   300  func TestPingPongHog(t *testing.T) {
   301  	if testing.Short() {
   302  		t.Skip("skipping in -short mode")
   303  	}
   304  
   305  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   306  	done := make(chan bool)
   307  	hogChan, lightChan := make(chan bool), make(chan bool)
   308  	hogCount, lightCount := 0, 0
   309  
   310  	run := func(limit int, counter *int, wake chan bool) {
   311  		for {
   312  			select {
   313  			case <-done:
   314  				return
   315  
   316  			case <-wake:
   317  				for i := 0; i < limit; i++ {
   318  					*counter++
   319  				}
   320  				wake <- true
   321  			}
   322  		}
   323  	}
   324  
   325  	// Start two co-scheduled hog goroutines.
   326  	for i := 0; i < 2; i++ {
   327  		go run(1e6, &hogCount, hogChan)
   328  	}
   329  
   330  	// Start two co-scheduled light goroutines.
   331  	for i := 0; i < 2; i++ {
   332  		go run(1e3, &lightCount, lightChan)
   333  	}
   334  
   335  	// Start goroutine pairs and wait for a few preemption rounds.
   336  	hogChan <- true
   337  	lightChan <- true
   338  	time.Sleep(100 * time.Millisecond)
   339  	close(done)
   340  	<-hogChan
   341  	<-lightChan
   342  
   343  	// Check that hogCount and lightCount are within a factor of
   344  	// 2, which indicates that both pairs of goroutines handed off
   345  	// the P within a time-slice to their buddy.
   346  	if hogCount > lightCount*2 || lightCount > hogCount*2 {
   347  		t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
   348  	}
   349  }
   350  
   351  func BenchmarkPingPongHog(b *testing.B) {
   352  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   353  
   354  	// Create a CPU hog
   355  	stop, done := make(chan bool), make(chan bool)
   356  	go func() {
   357  		for {
   358  			select {
   359  			case <-stop:
   360  				done <- true
   361  				return
   362  			default:
   363  			}
   364  		}
   365  	}()
   366  
   367  	// Ping-pong b.N times
   368  	ping, pong := make(chan bool), make(chan bool)
   369  	go func() {
   370  		for j := 0; j < b.N; j++ {
   371  			pong <- <-ping
   372  		}
   373  		close(stop)
   374  		done <- true
   375  	}()
   376  	go func() {
   377  		for i := 0; i < b.N; i++ {
   378  			ping <- <-pong
   379  		}
   380  		done <- true
   381  	}()
   382  	b.ResetTimer()
   383  	ping <- true // Start ping-pong
   384  	<-stop
   385  	b.StopTimer()
   386  	<-ping // Let last ponger exit
   387  	<-done // Make sure goroutines exit
   388  	<-done
   389  	<-done
   390  }
   391  
   392  func stackGrowthRecursive(i int) {
   393  	var pad [128]uint64
   394  	if i != 0 && pad[0] == 0 {
   395  		stackGrowthRecursive(i - 1)
   396  	}
   397  }
   398  
   399  func TestPreemptSplitBig(t *testing.T) {
   400  	if testing.Short() {
   401  		t.Skip("skipping in -short mode")
   402  	}
   403  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   404  	stop := make(chan int)
   405  	go big(stop)
   406  	for i := 0; i < 3; i++ {
   407  		time.Sleep(10 * time.Microsecond) // let big start running
   408  		runtime.GC()
   409  	}
   410  	close(stop)
   411  }
   412  
   413  func big(stop chan int) int {
   414  	n := 0
   415  	for {
   416  		// delay so that gc is sure to have asked for a preemption
   417  		for i := 0; i < 1e9; i++ {
   418  			n++
   419  		}
   420  
   421  		// call bigframe, which used to miss the preemption in its prologue.
   422  		bigframe(stop)
   423  
   424  		// check if we've been asked to stop.
   425  		select {
   426  		case <-stop:
   427  			return n
   428  		}
   429  	}
   430  }
   431  
   432  func bigframe(stop chan int) int {
   433  	// not splitting the stack will overflow.
   434  	// small will notice that it needs a stack split and will
   435  	// catch the overflow.
   436  	var x [8192]byte
   437  	return small(stop, &x)
   438  }
   439  
   440  func small(stop chan int, x *[8192]byte) int {
   441  	for i := range x {
   442  		x[i] = byte(i)
   443  	}
   444  	sum := 0
   445  	for i := range x {
   446  		sum += int(x[i])
   447  	}
   448  
   449  	// keep small from being a leaf function, which might
   450  	// make it not do any stack check at all.
   451  	nonleaf(stop)
   452  
   453  	return sum
   454  }
   455  
   456  func nonleaf(stop chan int) bool {
   457  	// do something that won't be inlined:
   458  	select {
   459  	case <-stop:
   460  		return true
   461  	default:
   462  		return false
   463  	}
   464  }
   465  
   466  func TestSchedLocalQueue(t *testing.T) {
   467  	runtime.RunSchedLocalQueueTest()
   468  }
   469  
   470  func TestSchedLocalQueueSteal(t *testing.T) {
   471  	runtime.RunSchedLocalQueueStealTest()
   472  }
   473  
   474  func benchmarkStackGrowth(b *testing.B, rec int) {
   475  	b.RunParallel(func(pb *testing.PB) {
   476  		for pb.Next() {
   477  			stackGrowthRecursive(rec)
   478  		}
   479  	})
   480  }
   481  
   482  func BenchmarkStackGrowth(b *testing.B) {
   483  	benchmarkStackGrowth(b, 10)
   484  }
   485  
   486  func BenchmarkStackGrowthDeep(b *testing.B) {
   487  	benchmarkStackGrowth(b, 1024)
   488  }
   489  
   490  func BenchmarkCreateGoroutines(b *testing.B) {
   491  	benchmarkCreateGoroutines(b, 1)
   492  }
   493  
   494  func BenchmarkCreateGoroutinesParallel(b *testing.B) {
   495  	benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
   496  }
   497  
   498  func benchmarkCreateGoroutines(b *testing.B, procs int) {
   499  	c := make(chan bool)
   500  	var f func(n int)
   501  	f = func(n int) {
   502  		if n == 0 {
   503  			c <- true
   504  			return
   505  		}
   506  		go f(n - 1)
   507  	}
   508  	for i := 0; i < procs; i++ {
   509  		go f(b.N / procs)
   510  	}
   511  	for i := 0; i < procs; i++ {
   512  		<-c
   513  	}
   514  }
   515  
   516  func BenchmarkCreateGoroutinesCapture(b *testing.B) {
   517  	b.ReportAllocs()
   518  	for i := 0; i < b.N; i++ {
   519  		const N = 4
   520  		var wg sync.WaitGroup
   521  		wg.Add(N)
   522  		for i := 0; i < N; i++ {
   523  			i := i
   524  			go func() {
   525  				if i >= N {
   526  					b.Logf("bad") // just to capture b
   527  				}
   528  				wg.Done()
   529  			}()
   530  		}
   531  		wg.Wait()
   532  	}
   533  }
   534  
   535  func BenchmarkClosureCall(b *testing.B) {
   536  	sum := 0
   537  	off1 := 1
   538  	for i := 0; i < b.N; i++ {
   539  		off2 := 2
   540  		func() {
   541  			sum += i + off1 + off2
   542  		}()
   543  	}
   544  	_ = sum
   545  }
   546  
   547  type Matrix [][]float64
   548  
   549  func BenchmarkMatmult(b *testing.B) {
   550  	b.StopTimer()
   551  	// matmult is O(N**3) but testing expects O(b.N),
   552  	// so we need to take cube root of b.N
   553  	n := int(math.Cbrt(float64(b.N))) + 1
   554  	A := makeMatrix(n)
   555  	B := makeMatrix(n)
   556  	C := makeMatrix(n)
   557  	b.StartTimer()
   558  	matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
   559  }
   560  
   561  func makeMatrix(n int) Matrix {
   562  	m := make(Matrix, n)
   563  	for i := 0; i < n; i++ {
   564  		m[i] = make([]float64, n)
   565  		for j := 0; j < n; j++ {
   566  			m[i][j] = float64(i*n + j)
   567  		}
   568  	}
   569  	return m
   570  }
   571  
   572  func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
   573  	di := i1 - i0
   574  	dj := j1 - j0
   575  	dk := k1 - k0
   576  	if di >= dj && di >= dk && di >= threshold {
   577  		// divide in two by y axis
   578  		mi := i0 + di/2
   579  		done1 := make(chan struct{}, 1)
   580  		go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
   581  		matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
   582  		<-done1
   583  	} else if dj >= dk && dj >= threshold {
   584  		// divide in two by x axis
   585  		mj := j0 + dj/2
   586  		done1 := make(chan struct{}, 1)
   587  		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
   588  		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
   589  		<-done1
   590  	} else if dk >= threshold {
   591  		// divide in two by "k" axis
   592  		// deliberately not parallel because of data races
   593  		mk := k0 + dk/2
   594  		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
   595  		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
   596  	} else {
   597  		// the matrices are small enough, compute directly
   598  		for i := i0; i < i1; i++ {
   599  			for j := j0; j < j1; j++ {
   600  				for k := k0; k < k1; k++ {
   601  					C[i][j] += A[i][k] * B[k][j]
   602  				}
   603  			}
   604  		}
   605  	}
   606  	if done != nil {
   607  		done <- struct{}{}
   608  	}
   609  }