github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/chan_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"internal/testenv"
     9  	"math"
    10  	"runtime"
    11  	"sync"
    12  	"sync/atomic"
    13  	"testing"
    14  	"time"
    15  )
    16  
    17  func TestChan(t *testing.T) {
    18  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
    19  	N := 200
    20  	if testing.Short() {
    21  		N = 20
    22  	}
    23  	for chanCap := 0; chanCap < N; chanCap++ {
    24  		{
    25  			// Ensure that receive from empty chan blocks.
    26  			c := make(chan int, chanCap)
    27  			recv1 := false
    28  			go func() {
    29  				_ = <-c
    30  				recv1 = true
    31  			}()
    32  			recv2 := false
    33  			go func() {
    34  				_, _ = <-c
    35  				recv2 = true
    36  			}()
    37  			time.Sleep(time.Millisecond)
    38  			if recv1 || recv2 {
    39  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    40  			}
    41  			// Ensure that non-blocking receive does not block.
    42  			select {
    43  			case _ = <-c:
    44  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    45  			default:
    46  			}
    47  			select {
    48  			case _, _ = <-c:
    49  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    50  			default:
    51  			}
    52  			c <- 0
    53  			c <- 0
    54  		}
    55  
    56  		{
    57  			// Ensure that send to full chan blocks.
    58  			c := make(chan int, chanCap)
    59  			for i := 0; i < chanCap; i++ {
    60  				c <- i
    61  			}
    62  			sent := uint32(0)
    63  			go func() {
    64  				c <- 0
    65  				atomic.StoreUint32(&sent, 1)
    66  			}()
    67  			time.Sleep(time.Millisecond)
    68  			if atomic.LoadUint32(&sent) != 0 {
    69  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    70  			}
    71  			// Ensure that non-blocking send does not block.
    72  			select {
    73  			case c <- 0:
    74  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    75  			default:
    76  			}
    77  			<-c
    78  		}
    79  
    80  		{
    81  			// Ensure that we receive 0 from closed chan.
    82  			c := make(chan int, chanCap)
    83  			for i := 0; i < chanCap; i++ {
    84  				c <- i
    85  			}
    86  			close(c)
    87  			for i := 0; i < chanCap; i++ {
    88  				v := <-c
    89  				if v != i {
    90  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
    91  				}
    92  			}
    93  			if v := <-c; v != 0 {
    94  				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
    95  			}
    96  			if v, ok := <-c; v != 0 || ok {
    97  				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
    98  			}
    99  		}
   100  
   101  		{
   102  			// Ensure that close unblocks receive.
   103  			c := make(chan int, chanCap)
   104  			done := make(chan bool)
   105  			go func() {
   106  				v, ok := <-c
   107  				done <- v == 0 && ok == false
   108  			}()
   109  			time.Sleep(time.Millisecond)
   110  			close(c)
   111  			if !<-done {
   112  				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
   113  			}
   114  		}
   115  
   116  		{
   117  			// Send 100 integers,
   118  			// ensure that we receive them non-corrupted in FIFO order.
   119  			c := make(chan int, chanCap)
   120  			go func() {
   121  				for i := 0; i < 100; i++ {
   122  					c <- i
   123  				}
   124  			}()
   125  			for i := 0; i < 100; i++ {
   126  				v := <-c
   127  				if v != i {
   128  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   129  				}
   130  			}
   131  
   132  			// Same, but using recv2.
   133  			go func() {
   134  				for i := 0; i < 100; i++ {
   135  					c <- i
   136  				}
   137  			}()
   138  			for i := 0; i < 100; i++ {
   139  				v, ok := <-c
   140  				if !ok {
   141  					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
   142  				}
   143  				if v != i {
   144  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   145  				}
   146  			}
   147  
   148  			// Send 1000 integers in 4 goroutines,
   149  			// ensure that we receive what we send.
   150  			const P = 4
   151  			const L = 1000
   152  			for p := 0; p < P; p++ {
   153  				go func() {
   154  					for i := 0; i < L; i++ {
   155  						c <- i
   156  					}
   157  				}()
   158  			}
   159  			done := make(chan map[int]int)
   160  			for p := 0; p < P; p++ {
   161  				go func() {
   162  					recv := make(map[int]int)
   163  					for i := 0; i < L; i++ {
   164  						v := <-c
   165  						recv[v] = recv[v] + 1
   166  					}
   167  					done <- recv
   168  				}()
   169  			}
   170  			recv := make(map[int]int)
   171  			for p := 0; p < P; p++ {
   172  				for k, v := range <-done {
   173  					recv[k] = recv[k] + v
   174  				}
   175  			}
   176  			if len(recv) != L {
   177  				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
   178  			}
   179  			for _, v := range recv {
   180  				if v != P {
   181  					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
   182  				}
   183  			}
   184  		}
   185  
   186  		{
   187  			// Test len/cap.
   188  			c := make(chan int, chanCap)
   189  			if len(c) != 0 || cap(c) != chanCap {
   190  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
   191  			}
   192  			for i := 0; i < chanCap; i++ {
   193  				c <- i
   194  			}
   195  			if len(c) != chanCap || cap(c) != chanCap {
   196  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
   197  			}
   198  		}
   199  
   200  	}
   201  }
   202  
   203  func TestNonblockRecvRace(t *testing.T) {
   204  	n := 10000
   205  	if testing.Short() {
   206  		n = 100
   207  	}
   208  	for i := 0; i < n; i++ {
   209  		c := make(chan int, 1)
   210  		c <- 1
   211  		go func() {
   212  			select {
   213  			case <-c:
   214  			default:
   215  				t.Error("chan is not ready")
   216  			}
   217  		}()
   218  		close(c)
   219  		<-c
   220  		if t.Failed() {
   221  			return
   222  		}
   223  	}
   224  }
   225  
   226  // This test checks that select acts on the state of the channels at one
   227  // moment in the execution, not over a smeared time window.
   228  // In the test, one goroutine does:
   229  //
   230  //	create c1, c2
   231  //	make c1 ready for receiving
   232  //	create second goroutine
   233  //	make c2 ready for receiving
   234  //	make c1 no longer ready for receiving (if possible)
   235  //
   236  // The second goroutine does a non-blocking select receiving from c1 and c2.
   237  // From the time the second goroutine is created, at least one of c1 and c2
   238  // is always ready for receiving, so the select in the second goroutine must
   239  // always receive from one or the other. It must never execute the default case.
   240  func TestNonblockSelectRace(t *testing.T) {
   241  	n := 100000
   242  	if testing.Short() {
   243  		n = 1000
   244  	}
   245  	done := make(chan bool, 1)
   246  	for i := 0; i < n; i++ {
   247  		c1 := make(chan int, 1)
   248  		c2 := make(chan int, 1)
   249  		c1 <- 1
   250  		go func() {
   251  			select {
   252  			case <-c1:
   253  			case <-c2:
   254  			default:
   255  				done <- false
   256  				return
   257  			}
   258  			done <- true
   259  		}()
   260  		c2 <- 1
   261  		select {
   262  		case <-c1:
   263  		default:
   264  		}
   265  		if !<-done {
   266  			t.Fatal("no chan is ready")
   267  		}
   268  	}
   269  }
   270  
   271  // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
   272  func TestNonblockSelectRace2(t *testing.T) {
   273  	n := 100000
   274  	if testing.Short() {
   275  		n = 1000
   276  	}
   277  	done := make(chan bool, 1)
   278  	for i := 0; i < n; i++ {
   279  		c1 := make(chan int, 1)
   280  		c2 := make(chan int)
   281  		c1 <- 1
   282  		go func() {
   283  			select {
   284  			case <-c1:
   285  			case <-c2:
   286  			default:
   287  				done <- false
   288  				return
   289  			}
   290  			done <- true
   291  		}()
   292  		close(c2)
   293  		select {
   294  		case <-c1:
   295  		default:
   296  		}
   297  		if !<-done {
   298  			t.Fatal("no chan is ready")
   299  		}
   300  	}
   301  }
   302  
   303  func TestSelfSelect(t *testing.T) {
   304  	// Ensure that send/recv on the same chan in select
   305  	// does not crash nor deadlock.
   306  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   307  	for _, chanCap := range []int{0, 10} {
   308  		var wg sync.WaitGroup
   309  		wg.Add(2)
   310  		c := make(chan int, chanCap)
   311  		for p := 0; p < 2; p++ {
   312  			p := p
   313  			go func() {
   314  				defer wg.Done()
   315  				for i := 0; i < 1000; i++ {
   316  					if p == 0 || i%2 == 0 {
   317  						select {
   318  						case c <- p:
   319  						case v := <-c:
   320  							if chanCap == 0 && v == p {
   321  								t.Errorf("self receive")
   322  								return
   323  							}
   324  						}
   325  					} else {
   326  						select {
   327  						case v := <-c:
   328  							if chanCap == 0 && v == p {
   329  								t.Errorf("self receive")
   330  								return
   331  							}
   332  						case c <- p:
   333  						}
   334  					}
   335  				}
   336  			}()
   337  		}
   338  		wg.Wait()
   339  	}
   340  }
   341  
   342  func TestSelectStress(t *testing.T) {
   343  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
   344  	var c [4]chan int
   345  	c[0] = make(chan int)
   346  	c[1] = make(chan int)
   347  	c[2] = make(chan int, 2)
   348  	c[3] = make(chan int, 3)
   349  	N := int(1e5)
   350  	if testing.Short() {
   351  		N /= 10
   352  	}
   353  	// There are 4 goroutines that send N values on each of the chans,
   354  	// + 4 goroutines that receive N values on each of the chans,
   355  	// + 1 goroutine that sends N values on each of the chans in a single select,
   356  	// + 1 goroutine that receives N values on each of the chans in a single select.
   357  	// All these sends, receives and selects interact chaotically at runtime,
   358  	// but we are careful that this whole construct does not deadlock.
   359  	var wg sync.WaitGroup
   360  	wg.Add(10)
   361  	for k := 0; k < 4; k++ {
   362  		k := k
   363  		go func() {
   364  			for i := 0; i < N; i++ {
   365  				c[k] <- 0
   366  			}
   367  			wg.Done()
   368  		}()
   369  		go func() {
   370  			for i := 0; i < N; i++ {
   371  				<-c[k]
   372  			}
   373  			wg.Done()
   374  		}()
   375  	}
   376  	go func() {
   377  		var n [4]int
   378  		c1 := c
   379  		for i := 0; i < 4*N; i++ {
   380  			select {
   381  			case c1[3] <- 0:
   382  				n[3]++
   383  				if n[3] == N {
   384  					c1[3] = nil
   385  				}
   386  			case c1[2] <- 0:
   387  				n[2]++
   388  				if n[2] == N {
   389  					c1[2] = nil
   390  				}
   391  			case c1[0] <- 0:
   392  				n[0]++
   393  				if n[0] == N {
   394  					c1[0] = nil
   395  				}
   396  			case c1[1] <- 0:
   397  				n[1]++
   398  				if n[1] == N {
   399  					c1[1] = nil
   400  				}
   401  			}
   402  		}
   403  		wg.Done()
   404  	}()
   405  	go func() {
   406  		var n [4]int
   407  		c1 := c
   408  		for i := 0; i < 4*N; i++ {
   409  			select {
   410  			case <-c1[0]:
   411  				n[0]++
   412  				if n[0] == N {
   413  					c1[0] = nil
   414  				}
   415  			case <-c1[1]:
   416  				n[1]++
   417  				if n[1] == N {
   418  					c1[1] = nil
   419  				}
   420  			case <-c1[2]:
   421  				n[2]++
   422  				if n[2] == N {
   423  					c1[2] = nil
   424  				}
   425  			case <-c1[3]:
   426  				n[3]++
   427  				if n[3] == N {
   428  					c1[3] = nil
   429  				}
   430  			}
   431  		}
   432  		wg.Done()
   433  	}()
   434  	wg.Wait()
   435  }
   436  
   437  func TestSelectFairness(t *testing.T) {
   438  	const trials = 10000
   439  	if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
   440  		testenv.SkipFlaky(t, 22047)
   441  	}
   442  	c1 := make(chan byte, trials+1)
   443  	c2 := make(chan byte, trials+1)
   444  	for i := 0; i < trials+1; i++ {
   445  		c1 <- 1
   446  		c2 <- 2
   447  	}
   448  	c3 := make(chan byte)
   449  	c4 := make(chan byte)
   450  	out := make(chan byte)
   451  	done := make(chan byte)
   452  	var wg sync.WaitGroup
   453  	wg.Add(1)
   454  	go func() {
   455  		defer wg.Done()
   456  		for {
   457  			var b byte
   458  			select {
   459  			case b = <-c3:
   460  			case b = <-c4:
   461  			case b = <-c1:
   462  			case b = <-c2:
   463  			}
   464  			select {
   465  			case out <- b:
   466  			case <-done:
   467  				return
   468  			}
   469  		}
   470  	}()
   471  	cnt1, cnt2 := 0, 0
   472  	for i := 0; i < trials; i++ {
   473  		switch b := <-out; b {
   474  		case 1:
   475  			cnt1++
   476  		case 2:
   477  			cnt2++
   478  		default:
   479  			t.Fatalf("unexpected value %d on channel", b)
   480  		}
   481  	}
   482  	// If the select in the goroutine is fair,
   483  	// cnt1 and cnt2 should be about the same value.
   484  	// With 10,000 trials, the expected margin of error at
   485  	// a confidence level of six nines is 4.891676 / (2 * Sqrt(10000)).
   486  	r := float64(cnt1) / trials
   487  	e := math.Abs(r - 0.5)
   488  	t.Log(cnt1, cnt2, r, e)
   489  	if e > 4.891676/(2*math.Sqrt(trials)) {
   490  		t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
   491  	}
   492  	close(done)
   493  	wg.Wait()
   494  }
   495  
   496  func TestChanSendInterface(t *testing.T) {
   497  	type mt struct{}
   498  	m := &mt{}
   499  	c := make(chan any, 1)
   500  	c <- m
   501  	select {
   502  	case c <- m:
   503  	default:
   504  	}
   505  	select {
   506  	case c <- m:
   507  	case c <- &mt{}:
   508  	default:
   509  	}
   510  }
   511  
   512  func TestPseudoRandomSend(t *testing.T) {
   513  	n := 100
   514  	for _, chanCap := range []int{0, n} {
   515  		c := make(chan int, chanCap)
   516  		l := make([]int, n)
   517  		var m sync.Mutex
   518  		m.Lock()
   519  		go func() {
   520  			for i := 0; i < n; i++ {
   521  				runtime.Gosched()
   522  				l[i] = <-c
   523  			}
   524  			m.Unlock()
   525  		}()
   526  		for i := 0; i < n; i++ {
   527  			select {
   528  			case c <- 1:
   529  			case c <- 0:
   530  			}
   531  		}
   532  		m.Lock() // wait
   533  		n0 := 0
   534  		n1 := 0
   535  		for _, i := range l {
   536  			n0 += (i + 1) % 2
   537  			n1 += i
   538  		}
   539  		if n0 <= n/10 || n1 <= n/10 {
   540  			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
   541  		}
   542  	}
   543  }
   544  
   545  func TestMultiConsumer(t *testing.T) {
   546  	const nwork = 23
   547  	const niter = 271828
   548  
   549  	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
   550  
   551  	q := make(chan int, nwork*3)
   552  	r := make(chan int, nwork*3)
   553  
   554  	// workers
   555  	var wg sync.WaitGroup
   556  	for i := 0; i < nwork; i++ {
   557  		wg.Add(1)
   558  		go func(w int) {
   559  			for v := range q {
   560  				// mess with the fifo-ish nature of range
   561  				if pn[w%len(pn)] == v {
   562  					runtime.Gosched()
   563  				}
   564  				r <- v
   565  			}
   566  			wg.Done()
   567  		}(i)
   568  	}
   569  
   570  	// feeder & closer
   571  	expect := 0
   572  	go func() {
   573  		for i := 0; i < niter; i++ {
   574  			v := pn[i%len(pn)]
   575  			expect += v
   576  			q <- v
   577  		}
   578  		close(q)  // no more work
   579  		wg.Wait() // workers done
   580  		close(r)  // ... so there can be no more results
   581  	}()
   582  
   583  	// consume & check
   584  	n := 0
   585  	s := 0
   586  	for v := range r {
   587  		n++
   588  		s += v
   589  	}
   590  	if n != niter || s != expect {
   591  		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
   592  			expect, s, niter, n)
   593  	}
   594  }
   595  
   596  func TestShrinkStackDuringBlockedSend(t *testing.T) {
   597  	// make sure that channel operations still work when we are
   598  	// blocked on a channel send and we shrink the stack.
   599  	// NOTE: this test probably won't fail unless stack1.go:stackDebug
   600  	// is set to >= 1.
   601  	const n = 10
   602  	c := make(chan int)
   603  	done := make(chan struct{})
   604  
   605  	go func() {
   606  		for i := 0; i < n; i++ {
   607  			c <- i
   608  			// use lots of stack, briefly.
   609  			stackGrowthRecursive(20)
   610  		}
   611  		done <- struct{}{}
   612  	}()
   613  
   614  	for i := 0; i < n; i++ {
   615  		x := <-c
   616  		if x != i {
   617  			t.Errorf("bad channel read: want %d, got %d", i, x)
   618  		}
   619  		// Waste some time so sender can finish using lots of stack
   620  		// and block in channel send.
   621  		time.Sleep(1 * time.Millisecond)
   622  		// trigger GC which will shrink the stack of the sender.
   623  		runtime.GC()
   624  	}
   625  	<-done
   626  }
   627  
   628  func TestNoShrinkStackWhileParking(t *testing.T) {
   629  	if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" {
   630  		testenv.SkipFlaky(t, 49382)
   631  	}
   632  	if runtime.GOOS == "openbsd" {
   633  		testenv.SkipFlaky(t, 51482)
   634  	}
   635  
   636  	// The goal of this test is to trigger a "racy sudog adjustment"
   637  	// throw. Basically, there's a window between when a goroutine
   638  	// becomes available for preemption for stack scanning (and thus,
   639  	// stack shrinking) but before the goroutine has fully parked on a
   640  	// channel. See issue 40641 for more details on the problem.
   641  	//
   642  	// The way we try to induce this failure is to set up two
   643  	// goroutines: a sender and a receiver that communicate across
   644  	// a channel. We try to set up a situation where the sender
   645  	// grows its stack temporarily then *fully* blocks on a channel
   646  	// often. Meanwhile a GC is triggered so that we try to get a
   647  	// mark worker to shrink the sender's stack and race with the
   648  	// sender parking.
   649  	//
   650  	// Unfortunately the race window here is so small that we
   651  	// either need a ridiculous number of iterations, or we add
   652  	// "usleep(1000)" to park_m, just before the unlockf call.
   653  	const n = 10
   654  	send := func(c chan<- int, done chan struct{}) {
   655  		for i := 0; i < n; i++ {
   656  			c <- i
   657  			// Use lots of stack briefly so that
   658  			// the GC is going to want to shrink us
   659  			// when it scans us. Make sure not to
   660  			// do any function calls otherwise
   661  			// in order to avoid us shrinking ourselves
   662  			// when we're preempted.
   663  			stackGrowthRecursive(20)
   664  		}
   665  		done <- struct{}{}
   666  	}
   667  	recv := func(c <-chan int, done chan struct{}) {
   668  		for i := 0; i < n; i++ {
   669  			// Sleep here so that the sender always
   670  			// fully blocks.
   671  			time.Sleep(10 * time.Microsecond)
   672  			<-c
   673  		}
   674  		done <- struct{}{}
   675  	}
   676  	for i := 0; i < n*20; i++ {
   677  		c := make(chan int)
   678  		done := make(chan struct{})
   679  		go recv(c, done)
   680  		go send(c, done)
   681  		// Wait a little bit before triggering
   682  		// the GC to make sure the sender and
   683  		// receiver have gotten into their groove.
   684  		time.Sleep(50 * time.Microsecond)
   685  		runtime.GC()
   686  		<-done
   687  		<-done
   688  	}
   689  }
   690  
   691  func TestSelectDuplicateChannel(t *testing.T) {
   692  	// This test makes sure we can queue a G on
   693  	// the same channel multiple times.
   694  	c := make(chan int)
   695  	d := make(chan int)
   696  	e := make(chan int)
   697  
   698  	// goroutine A
   699  	go func() {
   700  		select {
   701  		case <-c:
   702  		case <-c:
   703  		case <-d:
   704  		}
   705  		e <- 9
   706  	}()
   707  	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
   708  
   709  	// goroutine B
   710  	go func() {
   711  		<-c
   712  	}()
   713  	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
   714  
   715  	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
   716  	<-e    // A tells us it's done
   717  	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
   718  }
   719  
   720  func TestSelectStackAdjust(t *testing.T) {
   721  	// Test that channel receive slots that contain local stack
   722  	// pointers are adjusted correctly by stack shrinking.
   723  	c := make(chan *int)
   724  	d := make(chan *int)
   725  	ready1 := make(chan bool)
   726  	ready2 := make(chan bool)
   727  
   728  	f := func(ready chan bool, dup bool) {
   729  		// Temporarily grow the stack to 10K.
   730  		stackGrowthRecursive((10 << 10) / (128 * 8))
   731  
   732  		// We're ready to trigger GC and stack shrink.
   733  		ready <- true
   734  
   735  		val := 42
   736  		var cx *int
   737  		cx = &val
   738  
   739  		var c2 chan *int
   740  		var d2 chan *int
   741  		if dup {
   742  			c2 = c
   743  			d2 = d
   744  		}
   745  
   746  		// Receive from d. cx won't be affected.
   747  		select {
   748  		case cx = <-c:
   749  		case <-c2:
   750  		case <-d:
   751  		case <-d2:
   752  		}
   753  
   754  		// Check that pointer in cx was adjusted correctly.
   755  		if cx != &val {
   756  			t.Error("cx no longer points to val")
   757  		} else if val != 42 {
   758  			t.Error("val changed")
   759  		} else {
   760  			*cx = 43
   761  			if val != 43 {
   762  				t.Error("changing *cx failed to change val")
   763  			}
   764  		}
   765  		ready <- true
   766  	}
   767  
   768  	go f(ready1, false)
   769  	go f(ready2, true)
   770  
   771  	// Let the goroutines get into the select.
   772  	<-ready1
   773  	<-ready2
   774  	time.Sleep(10 * time.Millisecond)
   775  
   776  	// Force concurrent GC to shrink the stacks.
   777  	runtime.GC()
   778  
   779  	// Wake selects.
   780  	close(d)
   781  	<-ready1
   782  	<-ready2
   783  }
   784  
   785  type struct0 struct{}
   786  
   787  func BenchmarkMakeChan(b *testing.B) {
   788  	b.Run("Byte", func(b *testing.B) {
   789  		var x chan byte
   790  		for i := 0; i < b.N; i++ {
   791  			x = make(chan byte, 8)
   792  		}
   793  		close(x)
   794  	})
   795  	b.Run("Int", func(b *testing.B) {
   796  		var x chan int
   797  		for i := 0; i < b.N; i++ {
   798  			x = make(chan int, 8)
   799  		}
   800  		close(x)
   801  	})
   802  	b.Run("Ptr", func(b *testing.B) {
   803  		var x chan *byte
   804  		for i := 0; i < b.N; i++ {
   805  			x = make(chan *byte, 8)
   806  		}
   807  		close(x)
   808  	})
   809  	b.Run("Struct", func(b *testing.B) {
   810  		b.Run("0", func(b *testing.B) {
   811  			var x chan struct0
   812  			for i := 0; i < b.N; i++ {
   813  				x = make(chan struct0, 8)
   814  			}
   815  			close(x)
   816  		})
   817  		b.Run("32", func(b *testing.B) {
   818  			var x chan struct32
   819  			for i := 0; i < b.N; i++ {
   820  				x = make(chan struct32, 8)
   821  			}
   822  			close(x)
   823  		})
   824  		b.Run("40", func(b *testing.B) {
   825  			var x chan struct40
   826  			for i := 0; i < b.N; i++ {
   827  				x = make(chan struct40, 8)
   828  			}
   829  			close(x)
   830  		})
   831  	})
   832  }
   833  
   834  func BenchmarkChanNonblocking(b *testing.B) {
   835  	myc := make(chan int)
   836  	b.RunParallel(func(pb *testing.PB) {
   837  		for pb.Next() {
   838  			select {
   839  			case <-myc:
   840  			default:
   841  			}
   842  		}
   843  	})
   844  }
   845  
   846  func BenchmarkSelectUncontended(b *testing.B) {
   847  	b.RunParallel(func(pb *testing.PB) {
   848  		myc1 := make(chan int, 1)
   849  		myc2 := make(chan int, 1)
   850  		myc1 <- 0
   851  		for pb.Next() {
   852  			select {
   853  			case <-myc1:
   854  				myc2 <- 0
   855  			case <-myc2:
   856  				myc1 <- 0
   857  			}
   858  		}
   859  	})
   860  }
   861  
   862  func BenchmarkSelectSyncContended(b *testing.B) {
   863  	myc1 := make(chan int)
   864  	myc2 := make(chan int)
   865  	myc3 := make(chan int)
   866  	done := make(chan int)
   867  	b.RunParallel(func(pb *testing.PB) {
   868  		go func() {
   869  			for {
   870  				select {
   871  				case myc1 <- 0:
   872  				case myc2 <- 0:
   873  				case myc3 <- 0:
   874  				case <-done:
   875  					return
   876  				}
   877  			}
   878  		}()
   879  		for pb.Next() {
   880  			select {
   881  			case <-myc1:
   882  			case <-myc2:
   883  			case <-myc3:
   884  			}
   885  		}
   886  	})
   887  	close(done)
   888  }
   889  
   890  func BenchmarkSelectAsyncContended(b *testing.B) {
   891  	procs := runtime.GOMAXPROCS(0)
   892  	myc1 := make(chan int, procs)
   893  	myc2 := make(chan int, procs)
   894  	b.RunParallel(func(pb *testing.PB) {
   895  		myc1 <- 0
   896  		for pb.Next() {
   897  			select {
   898  			case <-myc1:
   899  				myc2 <- 0
   900  			case <-myc2:
   901  				myc1 <- 0
   902  			}
   903  		}
   904  	})
   905  }
   906  
   907  func BenchmarkSelectNonblock(b *testing.B) {
   908  	myc1 := make(chan int)
   909  	myc2 := make(chan int)
   910  	myc3 := make(chan int, 1)
   911  	myc4 := make(chan int, 1)
   912  	b.RunParallel(func(pb *testing.PB) {
   913  		for pb.Next() {
   914  			select {
   915  			case <-myc1:
   916  			default:
   917  			}
   918  			select {
   919  			case myc2 <- 0:
   920  			default:
   921  			}
   922  			select {
   923  			case <-myc3:
   924  			default:
   925  			}
   926  			select {
   927  			case myc4 <- 0:
   928  			default:
   929  			}
   930  		}
   931  	})
   932  }
   933  
   934  func BenchmarkChanUncontended(b *testing.B) {
   935  	const C = 100
   936  	b.RunParallel(func(pb *testing.PB) {
   937  		myc := make(chan int, C)
   938  		for pb.Next() {
   939  			for i := 0; i < C; i++ {
   940  				myc <- 0
   941  			}
   942  			for i := 0; i < C; i++ {
   943  				<-myc
   944  			}
   945  		}
   946  	})
   947  }
   948  
   949  func BenchmarkChanContended(b *testing.B) {
   950  	const C = 100
   951  	myc := make(chan int, C*runtime.GOMAXPROCS(0))
   952  	b.RunParallel(func(pb *testing.PB) {
   953  		for pb.Next() {
   954  			for i := 0; i < C; i++ {
   955  				myc <- 0
   956  			}
   957  			for i := 0; i < C; i++ {
   958  				<-myc
   959  			}
   960  		}
   961  	})
   962  }
   963  
   964  func benchmarkChanSync(b *testing.B, work int) {
   965  	const CallsPerSched = 1000
   966  	procs := 2
   967  	N := int32(b.N / CallsPerSched / procs * procs)
   968  	c := make(chan bool, procs)
   969  	myc := make(chan int)
   970  	for p := 0; p < procs; p++ {
   971  		go func() {
   972  			for {
   973  				i := atomic.AddInt32(&N, -1)
   974  				if i < 0 {
   975  					break
   976  				}
   977  				for g := 0; g < CallsPerSched; g++ {
   978  					if i%2 == 0 {
   979  						<-myc
   980  						localWork(work)
   981  						myc <- 0
   982  						localWork(work)
   983  					} else {
   984  						myc <- 0
   985  						localWork(work)
   986  						<-myc
   987  						localWork(work)
   988  					}
   989  				}
   990  			}
   991  			c <- true
   992  		}()
   993  	}
   994  	for p := 0; p < procs; p++ {
   995  		<-c
   996  	}
   997  }
   998  
   999  func BenchmarkChanSync(b *testing.B) {
  1000  	benchmarkChanSync(b, 0)
  1001  }
  1002  
  1003  func BenchmarkChanSyncWork(b *testing.B) {
  1004  	benchmarkChanSync(b, 1000)
  1005  }
  1006  
  1007  func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
  1008  	const CallsPerSched = 1000
  1009  	procs := runtime.GOMAXPROCS(-1)
  1010  	N := int32(b.N / CallsPerSched)
  1011  	c := make(chan bool, 2*procs)
  1012  	myc := make(chan int, chanSize)
  1013  	for p := 0; p < procs; p++ {
  1014  		go func() {
  1015  			foo := 0
  1016  			for atomic.AddInt32(&N, -1) >= 0 {
  1017  				for g := 0; g < CallsPerSched; g++ {
  1018  					for i := 0; i < localWork; i++ {
  1019  						foo *= 2
  1020  						foo /= 2
  1021  					}
  1022  					myc <- 1
  1023  				}
  1024  			}
  1025  			myc <- 0
  1026  			c <- foo == 42
  1027  		}()
  1028  		go func() {
  1029  			foo := 0
  1030  			for {
  1031  				v := <-myc
  1032  				if v == 0 {
  1033  					break
  1034  				}
  1035  				for i := 0; i < localWork; i++ {
  1036  					foo *= 2
  1037  					foo /= 2
  1038  				}
  1039  			}
  1040  			c <- foo == 42
  1041  		}()
  1042  	}
  1043  	for p := 0; p < procs; p++ {
  1044  		<-c
  1045  		<-c
  1046  	}
  1047  }
  1048  
  1049  func BenchmarkChanProdCons0(b *testing.B) {
  1050  	benchmarkChanProdCons(b, 0, 0)
  1051  }
  1052  
  1053  func BenchmarkChanProdCons10(b *testing.B) {
  1054  	benchmarkChanProdCons(b, 10, 0)
  1055  }
  1056  
  1057  func BenchmarkChanProdCons100(b *testing.B) {
  1058  	benchmarkChanProdCons(b, 100, 0)
  1059  }
  1060  
  1061  func BenchmarkChanProdConsWork0(b *testing.B) {
  1062  	benchmarkChanProdCons(b, 0, 100)
  1063  }
  1064  
  1065  func BenchmarkChanProdConsWork10(b *testing.B) {
  1066  	benchmarkChanProdCons(b, 10, 100)
  1067  }
  1068  
  1069  func BenchmarkChanProdConsWork100(b *testing.B) {
  1070  	benchmarkChanProdCons(b, 100, 100)
  1071  }
  1072  
  1073  func BenchmarkSelectProdCons(b *testing.B) {
  1074  	const CallsPerSched = 1000
  1075  	procs := runtime.GOMAXPROCS(-1)
  1076  	N := int32(b.N / CallsPerSched)
  1077  	c := make(chan bool, 2*procs)
  1078  	myc := make(chan int, 128)
  1079  	myclose := make(chan bool)
  1080  	for p := 0; p < procs; p++ {
  1081  		go func() {
  1082  			// Producer: sends to myc.
  1083  			foo := 0
  1084  			// Intended to not fire during benchmarking.
  1085  			mytimer := time.After(time.Hour)
  1086  			for atomic.AddInt32(&N, -1) >= 0 {
  1087  				for g := 0; g < CallsPerSched; g++ {
  1088  					// Model some local work.
  1089  					for i := 0; i < 100; i++ {
  1090  						foo *= 2
  1091  						foo /= 2
  1092  					}
  1093  					select {
  1094  					case myc <- 1:
  1095  					case <-mytimer:
  1096  					case <-myclose:
  1097  					}
  1098  				}
  1099  			}
  1100  			myc <- 0
  1101  			c <- foo == 42
  1102  		}()
  1103  		go func() {
  1104  			// Consumer: receives from myc.
  1105  			foo := 0
  1106  			// Intended to not fire during benchmarking.
  1107  			mytimer := time.After(time.Hour)
  1108  		loop:
  1109  			for {
  1110  				select {
  1111  				case v := <-myc:
  1112  					if v == 0 {
  1113  						break loop
  1114  					}
  1115  				case <-mytimer:
  1116  				case <-myclose:
  1117  				}
  1118  				// Model some local work.
  1119  				for i := 0; i < 100; i++ {
  1120  					foo *= 2
  1121  					foo /= 2
  1122  				}
  1123  			}
  1124  			c <- foo == 42
  1125  		}()
  1126  	}
  1127  	for p := 0; p < procs; p++ {
  1128  		<-c
  1129  		<-c
  1130  	}
  1131  }
  1132  
  1133  func BenchmarkReceiveDataFromClosedChan(b *testing.B) {
  1134  	count := b.N
  1135  	ch := make(chan struct{}, count)
  1136  	for i := 0; i < count; i++ {
  1137  		ch <- struct{}{}
  1138  	}
  1139  	close(ch)
  1140  
  1141  	b.ResetTimer()
  1142  	for range ch {
  1143  	}
  1144  }
  1145  
  1146  func BenchmarkChanCreation(b *testing.B) {
  1147  	b.RunParallel(func(pb *testing.PB) {
  1148  		for pb.Next() {
  1149  			myc := make(chan int, 1)
  1150  			myc <- 0
  1151  			<-myc
  1152  		}
  1153  	})
  1154  }
  1155  
  1156  func BenchmarkChanSem(b *testing.B) {
  1157  	type Empty struct{}
  1158  	myc := make(chan Empty, runtime.GOMAXPROCS(0))
  1159  	b.RunParallel(func(pb *testing.PB) {
  1160  		for pb.Next() {
  1161  			myc <- Empty{}
  1162  			<-myc
  1163  		}
  1164  	})
  1165  }
  1166  
  1167  func BenchmarkChanPopular(b *testing.B) {
  1168  	const n = 1000
  1169  	c := make(chan bool)
  1170  	var a []chan bool
  1171  	var wg sync.WaitGroup
  1172  	wg.Add(n)
  1173  	for j := 0; j < n; j++ {
  1174  		d := make(chan bool)
  1175  		a = append(a, d)
  1176  		go func() {
  1177  			for i := 0; i < b.N; i++ {
  1178  				select {
  1179  				case <-c:
  1180  				case <-d:
  1181  				}
  1182  			}
  1183  			wg.Done()
  1184  		}()
  1185  	}
  1186  	for i := 0; i < b.N; i++ {
  1187  		for _, d := range a {
  1188  			d <- true
  1189  		}
  1190  	}
  1191  	wg.Wait()
  1192  }
  1193  
  1194  func BenchmarkChanClosed(b *testing.B) {
  1195  	c := make(chan struct{})
  1196  	close(c)
  1197  	b.RunParallel(func(pb *testing.PB) {
  1198  		for pb.Next() {
  1199  			select {
  1200  			case <-c:
  1201  			default:
  1202  				b.Error("Unreachable")
  1203  			}
  1204  		}
  1205  	})
  1206  }
  1207  
  1208  var (
  1209  	alwaysFalse = false
  1210  	workSink    = 0
  1211  )
  1212  
  1213  func localWork(w int) {
  1214  	foo := 0
  1215  	for i := 0; i < w; i++ {
  1216  		foo /= (foo + 1)
  1217  	}
  1218  	if alwaysFalse {
  1219  		workSink += foo
  1220  	}
  1221  }