github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/runtime/chan_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"runtime"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  )
    14  
    15  func TestChan(t *testing.T) {
    16  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
    17  	N := 200
    18  	if testing.Short() {
    19  		N = 20
    20  	}
    21  	for chanCap := 0; chanCap < N; chanCap++ {
    22  		{
    23  			// Ensure that receive from empty chan blocks.
    24  			c := make(chan int, chanCap)
    25  			recv1 := false
    26  			go func() {
    27  				_ = <-c
    28  				recv1 = true
    29  			}()
    30  			recv2 := false
    31  			go func() {
    32  				_, _ = <-c
    33  				recv2 = true
    34  			}()
    35  			time.Sleep(time.Millisecond)
    36  			if recv1 || recv2 {
    37  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    38  			}
    39  			// Ensure that non-blocking receive does not block.
    40  			select {
    41  			case _ = <-c:
    42  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    43  			default:
    44  			}
    45  			select {
    46  			case _, _ = <-c:
    47  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    48  			default:
    49  			}
    50  			c <- 0
    51  			c <- 0
    52  		}
    53  
    54  		{
    55  			// Ensure that send to full chan blocks.
    56  			c := make(chan int, chanCap)
    57  			for i := 0; i < chanCap; i++ {
    58  				c <- i
    59  			}
    60  			sent := uint32(0)
    61  			go func() {
    62  				c <- 0
    63  				atomic.StoreUint32(&sent, 1)
    64  			}()
    65  			time.Sleep(time.Millisecond)
    66  			if atomic.LoadUint32(&sent) != 0 {
    67  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    68  			}
    69  			// Ensure that non-blocking send does not block.
    70  			select {
    71  			case c <- 0:
    72  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    73  			default:
    74  			}
    75  			<-c
    76  		}
    77  
    78  		{
    79  			// Ensure that we receive 0 from closed chan.
    80  			c := make(chan int, chanCap)
    81  			for i := 0; i < chanCap; i++ {
    82  				c <- i
    83  			}
    84  			close(c)
    85  			for i := 0; i < chanCap; i++ {
    86  				v := <-c
    87  				if v != i {
    88  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
    89  				}
    90  			}
    91  			if v := <-c; v != 0 {
    92  				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
    93  			}
    94  			if v, ok := <-c; v != 0 || ok {
    95  				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
    96  			}
    97  		}
    98  
    99  		{
   100  			// Ensure that close unblocks receive.
   101  			c := make(chan int, chanCap)
   102  			done := make(chan bool)
   103  			go func() {
   104  				v, ok := <-c
   105  				done <- v == 0 && ok == false
   106  			}()
   107  			time.Sleep(time.Millisecond)
   108  			close(c)
   109  			if !<-done {
   110  				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
   111  			}
   112  		}
   113  
   114  		{
   115  			// Send 100 integers,
   116  			// ensure that we receive them non-corrupted in FIFO order.
   117  			c := make(chan int, chanCap)
   118  			go func() {
   119  				for i := 0; i < 100; i++ {
   120  					c <- i
   121  				}
   122  			}()
   123  			for i := 0; i < 100; i++ {
   124  				v := <-c
   125  				if v != i {
   126  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   127  				}
   128  			}
   129  
   130  			// Same, but using recv2.
   131  			go func() {
   132  				for i := 0; i < 100; i++ {
   133  					c <- i
   134  				}
   135  			}()
   136  			for i := 0; i < 100; i++ {
   137  				v, ok := <-c
   138  				if !ok {
   139  					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
   140  				}
   141  				if v != i {
   142  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   143  				}
   144  			}
   145  
   146  			// Send 1000 integers in 4 goroutines,
   147  			// ensure that we receive what we send.
   148  			const P = 4
   149  			const L = 1000
   150  			for p := 0; p < P; p++ {
   151  				go func() {
   152  					for i := 0; i < L; i++ {
   153  						c <- i
   154  					}
   155  				}()
   156  			}
   157  			done := make(chan map[int]int)
   158  			for p := 0; p < P; p++ {
   159  				go func() {
   160  					recv := make(map[int]int)
   161  					for i := 0; i < L; i++ {
   162  						v := <-c
   163  						recv[v] = recv[v] + 1
   164  					}
   165  					done <- recv
   166  				}()
   167  			}
   168  			recv := make(map[int]int)
   169  			for p := 0; p < P; p++ {
   170  				for k, v := range <-done {
   171  					recv[k] = recv[k] + v
   172  				}
   173  			}
   174  			if len(recv) != L {
   175  				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
   176  			}
   177  			for _, v := range recv {
   178  				if v != P {
   179  					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
   180  				}
   181  			}
   182  		}
   183  
   184  		{
   185  			// Test len/cap.
   186  			c := make(chan int, chanCap)
   187  			if len(c) != 0 || cap(c) != chanCap {
   188  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
   189  			}
   190  			for i := 0; i < chanCap; i++ {
   191  				c <- i
   192  			}
   193  			if len(c) != chanCap || cap(c) != chanCap {
   194  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
   195  			}
   196  		}
   197  
   198  	}
   199  }
   200  
   201  func TestNonblockRecvRace(t *testing.T) {
   202  	n := 10000
   203  	if testing.Short() {
   204  		n = 100
   205  	}
   206  	for i := 0; i < n; i++ {
   207  		c := make(chan int, 1)
   208  		c <- 1
   209  		go func() {
   210  			select {
   211  			case <-c:
   212  			default:
   213  				t.Error("chan is not ready")
   214  			}
   215  		}()
   216  		close(c)
   217  		<-c
   218  		if t.Failed() {
   219  			return
   220  		}
   221  	}
   222  }
   223  
   224  // This test checks that select acts on the state of the channels at one
   225  // moment in the execution, not over a smeared time window.
   226  // In the test, one goroutine does:
   227  //	create c1, c2
   228  //	make c1 ready for receiving
   229  //	create second goroutine
   230  //	make c2 ready for receiving
   231  //	make c1 no longer ready for receiving (if possible)
   232  // The second goroutine does a non-blocking select receiving from c1 and c2.
   233  // From the time the second goroutine is created, at least one of c1 and c2
   234  // is always ready for receiving, so the select in the second goroutine must
   235  // always receive from one or the other. It must never execute the default case.
   236  func TestNonblockSelectRace(t *testing.T) {
   237  	n := 100000
   238  	if testing.Short() {
   239  		n = 1000
   240  	}
   241  	done := make(chan bool, 1)
   242  	for i := 0; i < n; i++ {
   243  		c1 := make(chan int, 1)
   244  		c2 := make(chan int, 1)
   245  		c1 <- 1
   246  		go func() {
   247  			select {
   248  			case <-c1:
   249  			case <-c2:
   250  			default:
   251  				done <- false
   252  				return
   253  			}
   254  			done <- true
   255  		}()
   256  		c2 <- 1
   257  		select {
   258  		case <-c1:
   259  		default:
   260  		}
   261  		if !<-done {
   262  			t.Fatal("no chan is ready")
   263  		}
   264  	}
   265  }
   266  
   267  // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
   268  func TestNonblockSelectRace2(t *testing.T) {
   269  	n := 100000
   270  	if testing.Short() {
   271  		n = 1000
   272  	}
   273  	done := make(chan bool, 1)
   274  	for i := 0; i < n; i++ {
   275  		c1 := make(chan int, 1)
   276  		c2 := make(chan int)
   277  		c1 <- 1
   278  		go func() {
   279  			select {
   280  			case <-c1:
   281  			case <-c2:
   282  			default:
   283  				done <- false
   284  				return
   285  			}
   286  			done <- true
   287  		}()
   288  		close(c2)
   289  		select {
   290  		case <-c1:
   291  		default:
   292  		}
   293  		if !<-done {
   294  			t.Fatal("no chan is ready")
   295  		}
   296  	}
   297  }
   298  
   299  func TestSelfSelect(t *testing.T) {
   300  	// Ensure that send/recv on the same chan in select
   301  	// does not crash nor deadlock.
   302  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   303  	for _, chanCap := range []int{0, 10} {
   304  		var wg sync.WaitGroup
   305  		wg.Add(2)
   306  		c := make(chan int, chanCap)
   307  		for p := 0; p < 2; p++ {
   308  			p := p
   309  			go func() {
   310  				defer wg.Done()
   311  				for i := 0; i < 1000; i++ {
   312  					if p == 0 || i%2 == 0 {
   313  						select {
   314  						case c <- p:
   315  						case v := <-c:
   316  							if chanCap == 0 && v == p {
   317  								t.Errorf("self receive")
   318  								return
   319  							}
   320  						}
   321  					} else {
   322  						select {
   323  						case v := <-c:
   324  							if chanCap == 0 && v == p {
   325  								t.Errorf("self receive")
   326  								return
   327  							}
   328  						case c <- p:
   329  						}
   330  					}
   331  				}
   332  			}()
   333  		}
   334  		wg.Wait()
   335  	}
   336  }
   337  
   338  func TestSelectStress(t *testing.T) {
   339  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
   340  	var c [4]chan int
   341  	c[0] = make(chan int)
   342  	c[1] = make(chan int)
   343  	c[2] = make(chan int, 2)
   344  	c[3] = make(chan int, 3)
   345  	N := int(1e5)
   346  	if testing.Short() {
   347  		N /= 10
   348  	}
   349  	// There are 4 goroutines that send N values on each of the chans,
   350  	// + 4 goroutines that receive N values on each of the chans,
   351  	// + 1 goroutine that sends N values on each of the chans in a single select,
   352  	// + 1 goroutine that receives N values on each of the chans in a single select.
   353  	// All these sends, receives and selects interact chaotically at runtime,
   354  	// but we are careful that this whole construct does not deadlock.
   355  	var wg sync.WaitGroup
   356  	wg.Add(10)
   357  	for k := 0; k < 4; k++ {
   358  		k := k
   359  		go func() {
   360  			for i := 0; i < N; i++ {
   361  				c[k] <- 0
   362  			}
   363  			wg.Done()
   364  		}()
   365  		go func() {
   366  			for i := 0; i < N; i++ {
   367  				<-c[k]
   368  			}
   369  			wg.Done()
   370  		}()
   371  	}
   372  	go func() {
   373  		var n [4]int
   374  		c1 := c
   375  		for i := 0; i < 4*N; i++ {
   376  			select {
   377  			case c1[3] <- 0:
   378  				n[3]++
   379  				if n[3] == N {
   380  					c1[3] = nil
   381  				}
   382  			case c1[2] <- 0:
   383  				n[2]++
   384  				if n[2] == N {
   385  					c1[2] = nil
   386  				}
   387  			case c1[0] <- 0:
   388  				n[0]++
   389  				if n[0] == N {
   390  					c1[0] = nil
   391  				}
   392  			case c1[1] <- 0:
   393  				n[1]++
   394  				if n[1] == N {
   395  					c1[1] = nil
   396  				}
   397  			}
   398  		}
   399  		wg.Done()
   400  	}()
   401  	go func() {
   402  		var n [4]int
   403  		c1 := c
   404  		for i := 0; i < 4*N; i++ {
   405  			select {
   406  			case <-c1[0]:
   407  				n[0]++
   408  				if n[0] == N {
   409  					c1[0] = nil
   410  				}
   411  			case <-c1[1]:
   412  				n[1]++
   413  				if n[1] == N {
   414  					c1[1] = nil
   415  				}
   416  			case <-c1[2]:
   417  				n[2]++
   418  				if n[2] == N {
   419  					c1[2] = nil
   420  				}
   421  			case <-c1[3]:
   422  				n[3]++
   423  				if n[3] == N {
   424  					c1[3] = nil
   425  				}
   426  			}
   427  		}
   428  		wg.Done()
   429  	}()
   430  	wg.Wait()
   431  }
   432  
   433  func TestChanSendInterface(t *testing.T) {
   434  	type mt struct{}
   435  	m := &mt{}
   436  	c := make(chan interface{}, 1)
   437  	c <- m
   438  	select {
   439  	case c <- m:
   440  	default:
   441  	}
   442  	select {
   443  	case c <- m:
   444  	case c <- &mt{}:
   445  	default:
   446  	}
   447  }
   448  
   449  func TestPseudoRandomSend(t *testing.T) {
   450  	n := 100
   451  	for _, chanCap := range []int{0, n} {
   452  		c := make(chan int, chanCap)
   453  		l := make([]int, n)
   454  		var m sync.Mutex
   455  		m.Lock()
   456  		go func() {
   457  			for i := 0; i < n; i++ {
   458  				runtime.Gosched()
   459  				l[i] = <-c
   460  			}
   461  			m.Unlock()
   462  		}()
   463  		for i := 0; i < n; i++ {
   464  			select {
   465  			case c <- 1:
   466  			case c <- 0:
   467  			}
   468  		}
   469  		m.Lock() // wait
   470  		n0 := 0
   471  		n1 := 0
   472  		for _, i := range l {
   473  			n0 += (i + 1) % 2
   474  			n1 += i
   475  		}
   476  		if n0 <= n/10 || n1 <= n/10 {
   477  			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
   478  		}
   479  	}
   480  }
   481  
   482  func TestMultiConsumer(t *testing.T) {
   483  	const nwork = 23
   484  	const niter = 271828
   485  
   486  	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
   487  
   488  	q := make(chan int, nwork*3)
   489  	r := make(chan int, nwork*3)
   490  
   491  	// workers
   492  	var wg sync.WaitGroup
   493  	for i := 0; i < nwork; i++ {
   494  		wg.Add(1)
   495  		go func(w int) {
   496  			for v := range q {
   497  				// mess with the fifo-ish nature of range
   498  				if pn[w%len(pn)] == v {
   499  					runtime.Gosched()
   500  				}
   501  				r <- v
   502  			}
   503  			wg.Done()
   504  		}(i)
   505  	}
   506  
   507  	// feeder & closer
   508  	expect := 0
   509  	go func() {
   510  		for i := 0; i < niter; i++ {
   511  			v := pn[i%len(pn)]
   512  			expect += v
   513  			q <- v
   514  		}
   515  		close(q)  // no more work
   516  		wg.Wait() // workers done
   517  		close(r)  // ... so there can be no more results
   518  	}()
   519  
   520  	// consume & check
   521  	n := 0
   522  	s := 0
   523  	for v := range r {
   524  		n++
   525  		s += v
   526  	}
   527  	if n != niter || s != expect {
   528  		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
   529  			expect, s, niter, n)
   530  	}
   531  }
   532  
   533  func TestShrinkStackDuringBlockedSend(t *testing.T) {
   534  	// make sure that channel operations still work when we are
   535  	// blocked on a channel send and we shrink the stack.
   536  	// NOTE: this test probably won't fail unless stack1.go:stackDebug
   537  	// is set to >= 1.
   538  	const n = 10
   539  	c := make(chan int)
   540  	done := make(chan struct{})
   541  
   542  	go func() {
   543  		for i := 0; i < n; i++ {
   544  			c <- i
   545  			// use lots of stack, briefly.
   546  			stackGrowthRecursive(20)
   547  		}
   548  		done <- struct{}{}
   549  	}()
   550  
   551  	for i := 0; i < n; i++ {
   552  		x := <-c
   553  		if x != i {
   554  			t.Errorf("bad channel read: want %d, got %d", i, x)
   555  		}
   556  		// Waste some time so sender can finish using lots of stack
   557  		// and block in channel send.
   558  		time.Sleep(1 * time.Millisecond)
   559  		// trigger GC which will shrink the stack of the sender.
   560  		runtime.GC()
   561  	}
   562  	<-done
   563  }
   564  
   565  func TestSelectDuplicateChannel(t *testing.T) {
   566  	// This test makes sure we can queue a G on
   567  	// the same channel multiple times.
   568  	c := make(chan int)
   569  	d := make(chan int)
   570  	e := make(chan int)
   571  
   572  	// goroutine A
   573  	go func() {
   574  		select {
   575  		case <-c:
   576  		case <-c:
   577  		case <-d:
   578  		}
   579  		e <- 9
   580  	}()
   581  	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
   582  
   583  	// goroutine B
   584  	go func() {
   585  		<-c
   586  	}()
   587  	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
   588  
   589  	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
   590  	<-e    // A tells us it's done
   591  	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
   592  }
   593  
   594  var selectSink interface{}
   595  
   596  func TestSelectStackAdjust(t *testing.T) {
   597  	// Test that channel receive slots that contain local stack
   598  	// pointers are adjusted correctly by stack shrinking.
   599  	c := make(chan *int)
   600  	d := make(chan *int)
   601  	ready1 := make(chan bool)
   602  	ready2 := make(chan bool)
   603  
   604  	f := func(ready chan bool, dup bool) {
   605  		// Temporarily grow the stack to 10K.
   606  		stackGrowthRecursive((10 << 10) / (128 * 8))
   607  
   608  		// We're ready to trigger GC and stack shrink.
   609  		ready <- true
   610  
   611  		val := 42
   612  		var cx *int
   613  		cx = &val
   614  
   615  		var c2 chan *int
   616  		var d2 chan *int
   617  		if dup {
   618  			c2 = c
   619  			d2 = d
   620  		}
   621  
   622  		// Receive from d. cx won't be affected.
   623  		select {
   624  		case cx = <-c:
   625  		case <-c2:
   626  		case <-d:
   627  		case <-d2:
   628  		}
   629  
   630  		// Check that pointer in cx was adjusted correctly.
   631  		if cx != &val {
   632  			t.Error("cx no longer points to val")
   633  		} else if val != 42 {
   634  			t.Error("val changed")
   635  		} else {
   636  			*cx = 43
   637  			if val != 43 {
   638  				t.Error("changing *cx failed to change val")
   639  			}
   640  		}
   641  		ready <- true
   642  	}
   643  
   644  	go f(ready1, false)
   645  	go f(ready2, true)
   646  
   647  	// Let the goroutines get into the select.
   648  	<-ready1
   649  	<-ready2
   650  	time.Sleep(10 * time.Millisecond)
   651  
   652  	// Force concurrent GC a few times.
   653  	var before, after runtime.MemStats
   654  	runtime.ReadMemStats(&before)
   655  	for i := 0; i < 100; i++ {
   656  		selectSink = new([1 << 20]byte)
   657  		runtime.ReadMemStats(&after)
   658  		if after.NumGC-before.NumGC >= 2 {
   659  			goto done
   660  		}
   661  	}
   662  	t.Fatal("failed to trigger concurrent GC")
   663  done:
   664  	selectSink = nil
   665  
   666  	// Wake selects.
   667  	close(d)
   668  	<-ready1
   669  	<-ready2
   670  }
   671  
   672  type struct0 struct{}
   673  
   674  func BenchmarkMakeChan(b *testing.B) {
   675  	b.Run("Byte", func(b *testing.B) {
   676  		var x chan byte
   677  		for i := 0; i < b.N; i++ {
   678  			x = make(chan byte, 8)
   679  		}
   680  		close(x)
   681  	})
   682  	b.Run("Int", func(b *testing.B) {
   683  		var x chan int
   684  		for i := 0; i < b.N; i++ {
   685  			x = make(chan int, 8)
   686  		}
   687  		close(x)
   688  	})
   689  	b.Run("Ptr", func(b *testing.B) {
   690  		var x chan *byte
   691  		for i := 0; i < b.N; i++ {
   692  			x = make(chan *byte, 8)
   693  		}
   694  		close(x)
   695  	})
   696  	b.Run("Struct", func(b *testing.B) {
   697  		b.Run("0", func(b *testing.B) {
   698  			var x chan struct0
   699  			for i := 0; i < b.N; i++ {
   700  				x = make(chan struct0, 8)
   701  			}
   702  			close(x)
   703  		})
   704  		b.Run("32", func(b *testing.B) {
   705  			var x chan struct32
   706  			for i := 0; i < b.N; i++ {
   707  				x = make(chan struct32, 8)
   708  			}
   709  			close(x)
   710  		})
   711  		b.Run("40", func(b *testing.B) {
   712  			var x chan struct40
   713  			for i := 0; i < b.N; i++ {
   714  				x = make(chan struct40, 8)
   715  			}
   716  			close(x)
   717  		})
   718  	})
   719  }
   720  
   721  func BenchmarkChanNonblocking(b *testing.B) {
   722  	myc := make(chan int)
   723  	b.RunParallel(func(pb *testing.PB) {
   724  		for pb.Next() {
   725  			select {
   726  			case <-myc:
   727  			default:
   728  			}
   729  		}
   730  	})
   731  }
   732  
   733  func BenchmarkSelectUncontended(b *testing.B) {
   734  	b.RunParallel(func(pb *testing.PB) {
   735  		myc1 := make(chan int, 1)
   736  		myc2 := make(chan int, 1)
   737  		myc1 <- 0
   738  		for pb.Next() {
   739  			select {
   740  			case <-myc1:
   741  				myc2 <- 0
   742  			case <-myc2:
   743  				myc1 <- 0
   744  			}
   745  		}
   746  	})
   747  }
   748  
   749  func BenchmarkSelectSyncContended(b *testing.B) {
   750  	myc1 := make(chan int)
   751  	myc2 := make(chan int)
   752  	myc3 := make(chan int)
   753  	done := make(chan int)
   754  	b.RunParallel(func(pb *testing.PB) {
   755  		go func() {
   756  			for {
   757  				select {
   758  				case myc1 <- 0:
   759  				case myc2 <- 0:
   760  				case myc3 <- 0:
   761  				case <-done:
   762  					return
   763  				}
   764  			}
   765  		}()
   766  		for pb.Next() {
   767  			select {
   768  			case <-myc1:
   769  			case <-myc2:
   770  			case <-myc3:
   771  			}
   772  		}
   773  	})
   774  	close(done)
   775  }
   776  
   777  func BenchmarkSelectAsyncContended(b *testing.B) {
   778  	procs := runtime.GOMAXPROCS(0)
   779  	myc1 := make(chan int, procs)
   780  	myc2 := make(chan int, procs)
   781  	b.RunParallel(func(pb *testing.PB) {
   782  		myc1 <- 0
   783  		for pb.Next() {
   784  			select {
   785  			case <-myc1:
   786  				myc2 <- 0
   787  			case <-myc2:
   788  				myc1 <- 0
   789  			}
   790  		}
   791  	})
   792  }
   793  
   794  func BenchmarkSelectNonblock(b *testing.B) {
   795  	myc1 := make(chan int)
   796  	myc2 := make(chan int)
   797  	myc3 := make(chan int, 1)
   798  	myc4 := make(chan int, 1)
   799  	b.RunParallel(func(pb *testing.PB) {
   800  		for pb.Next() {
   801  			select {
   802  			case <-myc1:
   803  			default:
   804  			}
   805  			select {
   806  			case myc2 <- 0:
   807  			default:
   808  			}
   809  			select {
   810  			case <-myc3:
   811  			default:
   812  			}
   813  			select {
   814  			case myc4 <- 0:
   815  			default:
   816  			}
   817  		}
   818  	})
   819  }
   820  
   821  func BenchmarkChanUncontended(b *testing.B) {
   822  	const C = 100
   823  	b.RunParallel(func(pb *testing.PB) {
   824  		myc := make(chan int, C)
   825  		for pb.Next() {
   826  			for i := 0; i < C; i++ {
   827  				myc <- 0
   828  			}
   829  			for i := 0; i < C; i++ {
   830  				<-myc
   831  			}
   832  		}
   833  	})
   834  }
   835  
   836  func BenchmarkChanContended(b *testing.B) {
   837  	const C = 100
   838  	myc := make(chan int, C*runtime.GOMAXPROCS(0))
   839  	b.RunParallel(func(pb *testing.PB) {
   840  		for pb.Next() {
   841  			for i := 0; i < C; i++ {
   842  				myc <- 0
   843  			}
   844  			for i := 0; i < C; i++ {
   845  				<-myc
   846  			}
   847  		}
   848  	})
   849  }
   850  
   851  func benchmarkChanSync(b *testing.B, work int) {
   852  	const CallsPerSched = 1000
   853  	procs := 2
   854  	N := int32(b.N / CallsPerSched / procs * procs)
   855  	c := make(chan bool, procs)
   856  	myc := make(chan int)
   857  	for p := 0; p < procs; p++ {
   858  		go func() {
   859  			for {
   860  				i := atomic.AddInt32(&N, -1)
   861  				if i < 0 {
   862  					break
   863  				}
   864  				for g := 0; g < CallsPerSched; g++ {
   865  					if i%2 == 0 {
   866  						<-myc
   867  						localWork(work)
   868  						myc <- 0
   869  						localWork(work)
   870  					} else {
   871  						myc <- 0
   872  						localWork(work)
   873  						<-myc
   874  						localWork(work)
   875  					}
   876  				}
   877  			}
   878  			c <- true
   879  		}()
   880  	}
   881  	for p := 0; p < procs; p++ {
   882  		<-c
   883  	}
   884  }
   885  
   886  func BenchmarkChanSync(b *testing.B) {
   887  	benchmarkChanSync(b, 0)
   888  }
   889  
   890  func BenchmarkChanSyncWork(b *testing.B) {
   891  	benchmarkChanSync(b, 1000)
   892  }
   893  
   894  func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
   895  	const CallsPerSched = 1000
   896  	procs := runtime.GOMAXPROCS(-1)
   897  	N := int32(b.N / CallsPerSched)
   898  	c := make(chan bool, 2*procs)
   899  	myc := make(chan int, chanSize)
   900  	for p := 0; p < procs; p++ {
   901  		go func() {
   902  			foo := 0
   903  			for atomic.AddInt32(&N, -1) >= 0 {
   904  				for g := 0; g < CallsPerSched; g++ {
   905  					for i := 0; i < localWork; i++ {
   906  						foo *= 2
   907  						foo /= 2
   908  					}
   909  					myc <- 1
   910  				}
   911  			}
   912  			myc <- 0
   913  			c <- foo == 42
   914  		}()
   915  		go func() {
   916  			foo := 0
   917  			for {
   918  				v := <-myc
   919  				if v == 0 {
   920  					break
   921  				}
   922  				for i := 0; i < localWork; i++ {
   923  					foo *= 2
   924  					foo /= 2
   925  				}
   926  			}
   927  			c <- foo == 42
   928  		}()
   929  	}
   930  	for p := 0; p < procs; p++ {
   931  		<-c
   932  		<-c
   933  	}
   934  }
   935  
   936  func BenchmarkChanProdCons0(b *testing.B) {
   937  	benchmarkChanProdCons(b, 0, 0)
   938  }
   939  
   940  func BenchmarkChanProdCons10(b *testing.B) {
   941  	benchmarkChanProdCons(b, 10, 0)
   942  }
   943  
   944  func BenchmarkChanProdCons100(b *testing.B) {
   945  	benchmarkChanProdCons(b, 100, 0)
   946  }
   947  
   948  func BenchmarkChanProdConsWork0(b *testing.B) {
   949  	benchmarkChanProdCons(b, 0, 100)
   950  }
   951  
   952  func BenchmarkChanProdConsWork10(b *testing.B) {
   953  	benchmarkChanProdCons(b, 10, 100)
   954  }
   955  
   956  func BenchmarkChanProdConsWork100(b *testing.B) {
   957  	benchmarkChanProdCons(b, 100, 100)
   958  }
   959  
   960  func BenchmarkSelectProdCons(b *testing.B) {
   961  	const CallsPerSched = 1000
   962  	procs := runtime.GOMAXPROCS(-1)
   963  	N := int32(b.N / CallsPerSched)
   964  	c := make(chan bool, 2*procs)
   965  	myc := make(chan int, 128)
   966  	myclose := make(chan bool)
   967  	for p := 0; p < procs; p++ {
   968  		go func() {
   969  			// Producer: sends to myc.
   970  			foo := 0
   971  			// Intended to not fire during benchmarking.
   972  			mytimer := time.After(time.Hour)
   973  			for atomic.AddInt32(&N, -1) >= 0 {
   974  				for g := 0; g < CallsPerSched; g++ {
   975  					// Model some local work.
   976  					for i := 0; i < 100; i++ {
   977  						foo *= 2
   978  						foo /= 2
   979  					}
   980  					select {
   981  					case myc <- 1:
   982  					case <-mytimer:
   983  					case <-myclose:
   984  					}
   985  				}
   986  			}
   987  			myc <- 0
   988  			c <- foo == 42
   989  		}()
   990  		go func() {
   991  			// Consumer: receives from myc.
   992  			foo := 0
   993  			// Intended to not fire during benchmarking.
   994  			mytimer := time.After(time.Hour)
   995  		loop:
   996  			for {
   997  				select {
   998  				case v := <-myc:
   999  					if v == 0 {
  1000  						break loop
  1001  					}
  1002  				case <-mytimer:
  1003  				case <-myclose:
  1004  				}
  1005  				// Model some local work.
  1006  				for i := 0; i < 100; i++ {
  1007  					foo *= 2
  1008  					foo /= 2
  1009  				}
  1010  			}
  1011  			c <- foo == 42
  1012  		}()
  1013  	}
  1014  	for p := 0; p < procs; p++ {
  1015  		<-c
  1016  		<-c
  1017  	}
  1018  }
  1019  
  1020  func BenchmarkChanCreation(b *testing.B) {
  1021  	b.RunParallel(func(pb *testing.PB) {
  1022  		for pb.Next() {
  1023  			myc := make(chan int, 1)
  1024  			myc <- 0
  1025  			<-myc
  1026  		}
  1027  	})
  1028  }
  1029  
  1030  func BenchmarkChanSem(b *testing.B) {
  1031  	type Empty struct{}
  1032  	myc := make(chan Empty, runtime.GOMAXPROCS(0))
  1033  	b.RunParallel(func(pb *testing.PB) {
  1034  		for pb.Next() {
  1035  			myc <- Empty{}
  1036  			<-myc
  1037  		}
  1038  	})
  1039  }
  1040  
  1041  func BenchmarkChanPopular(b *testing.B) {
  1042  	const n = 1000
  1043  	c := make(chan bool)
  1044  	var a []chan bool
  1045  	var wg sync.WaitGroup
  1046  	wg.Add(n)
  1047  	for j := 0; j < n; j++ {
  1048  		d := make(chan bool)
  1049  		a = append(a, d)
  1050  		go func() {
  1051  			for i := 0; i < b.N; i++ {
  1052  				select {
  1053  				case <-c:
  1054  				case <-d:
  1055  				}
  1056  			}
  1057  			wg.Done()
  1058  		}()
  1059  	}
  1060  	for i := 0; i < b.N; i++ {
  1061  		for _, d := range a {
  1062  			d <- true
  1063  		}
  1064  	}
  1065  	wg.Wait()
  1066  }
  1067  
  1068  var (
  1069  	alwaysFalse = false
  1070  	workSink    = 0
  1071  )
  1072  
  1073  func localWork(w int) {
  1074  	foo := 0
  1075  	for i := 0; i < w; i++ {
  1076  		foo /= (foo + 1)
  1077  	}
  1078  	if alwaysFalse {
  1079  		workSink += foo
  1080  	}
  1081  }