github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/chan_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"runtime"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  )
    14  
    15  func TestChan(t *testing.T) {
    16  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
    17  	N := 200
    18  	if testing.Short() {
    19  		N = 20
    20  	}
    21  	for chanCap := 0; chanCap < N; chanCap++ {
    22  		{
    23  			// Ensure that receive from empty chan blocks.
    24  			c := make(chan int, chanCap)
    25  			recv1 := false
    26  			go func() {
    27  				_ = <-c
    28  				recv1 = true
    29  			}()
    30  			recv2 := false
    31  			go func() {
    32  				_, _ = <-c
    33  				recv2 = true
    34  			}()
    35  			time.Sleep(time.Millisecond)
    36  			if recv1 || recv2 {
    37  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    38  			}
    39  			// Ensure that non-blocking receive does not block.
    40  			select {
    41  			case _ = <-c:
    42  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    43  			default:
    44  			}
    45  			select {
    46  			case _, _ = <-c:
    47  				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
    48  			default:
    49  			}
    50  			c <- 0
    51  			c <- 0
    52  		}
    53  
    54  		{
    55  			// Ensure that send to full chan blocks.
    56  			c := make(chan int, chanCap)
    57  			for i := 0; i < chanCap; i++ {
    58  				c <- i
    59  			}
    60  			sent := uint32(0)
    61  			go func() {
    62  				c <- 0
    63  				atomic.StoreUint32(&sent, 1)
    64  			}()
    65  			time.Sleep(time.Millisecond)
    66  			if atomic.LoadUint32(&sent) != 0 {
    67  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    68  			}
    69  			// Ensure that non-blocking send does not block.
    70  			select {
    71  			case c <- 0:
    72  				t.Fatalf("chan[%d]: send to full chan", chanCap)
    73  			default:
    74  			}
    75  			<-c
    76  		}
    77  
    78  		{
    79  			// Ensure that we receive 0 from closed chan.
    80  			c := make(chan int, chanCap)
    81  			for i := 0; i < chanCap; i++ {
    82  				c <- i
    83  			}
    84  			close(c)
    85  			for i := 0; i < chanCap; i++ {
    86  				v := <-c
    87  				if v != i {
    88  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
    89  				}
    90  			}
    91  			if v := <-c; v != 0 {
    92  				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
    93  			}
    94  			if v, ok := <-c; v != 0 || ok {
    95  				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
    96  			}
    97  		}
    98  
    99  		{
   100  			// Ensure that close unblocks receive.
   101  			c := make(chan int, chanCap)
   102  			done := make(chan bool)
   103  			go func() {
   104  				v, ok := <-c
   105  				done <- v == 0 && ok == false
   106  			}()
   107  			time.Sleep(time.Millisecond)
   108  			close(c)
   109  			if !<-done {
   110  				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
   111  			}
   112  		}
   113  
   114  		{
   115  			// Send 100 integers,
   116  			// ensure that we receive them non-corrupted in FIFO order.
   117  			c := make(chan int, chanCap)
   118  			go func() {
   119  				for i := 0; i < 100; i++ {
   120  					c <- i
   121  				}
   122  			}()
   123  			for i := 0; i < 100; i++ {
   124  				v := <-c
   125  				if v != i {
   126  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   127  				}
   128  			}
   129  
   130  			// Same, but using recv2.
   131  			go func() {
   132  				for i := 0; i < 100; i++ {
   133  					c <- i
   134  				}
   135  			}()
   136  			for i := 0; i < 100; i++ {
   137  				v, ok := <-c
   138  				if !ok {
   139  					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
   140  				}
   141  				if v != i {
   142  					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
   143  				}
   144  			}
   145  
   146  			// Send 1000 integers in 4 goroutines,
   147  			// ensure that we receive what we send.
   148  			const P = 4
   149  			const L = 1000
   150  			for p := 0; p < P; p++ {
   151  				go func() {
   152  					for i := 0; i < L; i++ {
   153  						c <- i
   154  					}
   155  				}()
   156  			}
   157  			done := make(chan map[int]int)
   158  			for p := 0; p < P; p++ {
   159  				go func() {
   160  					recv := make(map[int]int)
   161  					for i := 0; i < L; i++ {
   162  						v := <-c
   163  						recv[v] = recv[v] + 1
   164  					}
   165  					done <- recv
   166  				}()
   167  			}
   168  			recv := make(map[int]int)
   169  			for p := 0; p < P; p++ {
   170  				for k, v := range <-done {
   171  					recv[k] = recv[k] + v
   172  				}
   173  			}
   174  			if len(recv) != L {
   175  				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
   176  			}
   177  			for _, v := range recv {
   178  				if v != P {
   179  					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
   180  				}
   181  			}
   182  		}
   183  
   184  		{
   185  			// Test len/cap.
   186  			c := make(chan int, chanCap)
   187  			if len(c) != 0 || cap(c) != chanCap {
   188  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
   189  			}
   190  			for i := 0; i < chanCap; i++ {
   191  				c <- i
   192  			}
   193  			if len(c) != chanCap || cap(c) != chanCap {
   194  				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
   195  			}
   196  		}
   197  
   198  	}
   199  }
   200  
   201  func TestNonblockRecvRace(t *testing.T) {
   202  	n := 10000
   203  	if testing.Short() {
   204  		n = 100
   205  	}
   206  	for i := 0; i < n; i++ {
   207  		c := make(chan int, 1)
   208  		c <- 1
   209  		go func() {
   210  			select {
   211  			case <-c:
   212  			default:
   213  				t.Fatal("chan is not ready")
   214  			}
   215  		}()
   216  		close(c)
   217  		<-c
   218  	}
   219  }
   220  
   221  // This test checks that select acts on the state of the channels at one
   222  // moment in the execution, not over a smeared time window.
   223  // In the test, one goroutine does:
   224  //	create c1, c2
   225  //	make c1 ready for receiving
   226  //	create second goroutine
   227  //	make c2 ready for receiving
   228  //	make c1 no longer ready for receiving (if possible)
   229  // The second goroutine does a non-blocking select receiving from c1 and c2.
   230  // From the time the second goroutine is created, at least one of c1 and c2
   231  // is always ready for receiving, so the select in the second goroutine must
   232  // always receive from one or the other. It must never execute the default case.
   233  func TestNonblockSelectRace(t *testing.T) {
   234  	n := 100000
   235  	if testing.Short() {
   236  		n = 1000
   237  	}
   238  	done := make(chan bool, 1)
   239  	for i := 0; i < n; i++ {
   240  		c1 := make(chan int, 1)
   241  		c2 := make(chan int, 1)
   242  		c1 <- 1
   243  		go func() {
   244  			select {
   245  			case <-c1:
   246  			case <-c2:
   247  			default:
   248  				done <- false
   249  				return
   250  			}
   251  			done <- true
   252  		}()
   253  		c2 <- 1
   254  		select {
   255  		case <-c1:
   256  		default:
   257  		}
   258  		if !<-done {
   259  			t.Fatal("no chan is ready")
   260  		}
   261  	}
   262  }
   263  
   264  // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
   265  func TestNonblockSelectRace2(t *testing.T) {
   266  	n := 100000
   267  	if testing.Short() {
   268  		n = 1000
   269  	}
   270  	done := make(chan bool, 1)
   271  	for i := 0; i < n; i++ {
   272  		c1 := make(chan int, 1)
   273  		c2 := make(chan int)
   274  		c1 <- 1
   275  		go func() {
   276  			select {
   277  			case <-c1:
   278  			case <-c2:
   279  			default:
   280  				done <- false
   281  				return
   282  			}
   283  			done <- true
   284  		}()
   285  		close(c2)
   286  		select {
   287  		case <-c1:
   288  		default:
   289  		}
   290  		if !<-done {
   291  			t.Fatal("no chan is ready")
   292  		}
   293  	}
   294  }
   295  
   296  func TestSelfSelect(t *testing.T) {
   297  	// Ensure that send/recv on the same chan in select
   298  	// does not crash nor deadlock.
   299  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   300  	for _, chanCap := range []int{0, 10} {
   301  		var wg sync.WaitGroup
   302  		wg.Add(2)
   303  		c := make(chan int, chanCap)
   304  		for p := 0; p < 2; p++ {
   305  			p := p
   306  			go func() {
   307  				defer wg.Done()
   308  				for i := 0; i < 1000; i++ {
   309  					if p == 0 || i%2 == 0 {
   310  						select {
   311  						case c <- p:
   312  						case v := <-c:
   313  							if chanCap == 0 && v == p {
   314  								t.Fatalf("self receive")
   315  							}
   316  						}
   317  					} else {
   318  						select {
   319  						case v := <-c:
   320  							if chanCap == 0 && v == p {
   321  								t.Fatalf("self receive")
   322  							}
   323  						case c <- p:
   324  						}
   325  					}
   326  				}
   327  			}()
   328  		}
   329  		wg.Wait()
   330  	}
   331  }
   332  
   333  func TestSelectStress(t *testing.T) {
   334  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
   335  	var c [4]chan int
   336  	c[0] = make(chan int)
   337  	c[1] = make(chan int)
   338  	c[2] = make(chan int, 2)
   339  	c[3] = make(chan int, 3)
   340  	N := int(1e5)
   341  	if testing.Short() {
   342  		N /= 10
   343  	}
   344  	// There are 4 goroutines that send N values on each of the chans,
   345  	// + 4 goroutines that receive N values on each of the chans,
   346  	// + 1 goroutine that sends N values on each of the chans in a single select,
   347  	// + 1 goroutine that receives N values on each of the chans in a single select.
   348  	// All these sends, receives and selects interact chaotically at runtime,
   349  	// but we are careful that this whole construct does not deadlock.
   350  	var wg sync.WaitGroup
   351  	wg.Add(10)
   352  	for k := 0; k < 4; k++ {
   353  		k := k
   354  		go func() {
   355  			for i := 0; i < N; i++ {
   356  				c[k] <- 0
   357  			}
   358  			wg.Done()
   359  		}()
   360  		go func() {
   361  			for i := 0; i < N; i++ {
   362  				<-c[k]
   363  			}
   364  			wg.Done()
   365  		}()
   366  	}
   367  	go func() {
   368  		var n [4]int
   369  		c1 := c
   370  		for i := 0; i < 4*N; i++ {
   371  			select {
   372  			case c1[3] <- 0:
   373  				n[3]++
   374  				if n[3] == N {
   375  					c1[3] = nil
   376  				}
   377  			case c1[2] <- 0:
   378  				n[2]++
   379  				if n[2] == N {
   380  					c1[2] = nil
   381  				}
   382  			case c1[0] <- 0:
   383  				n[0]++
   384  				if n[0] == N {
   385  					c1[0] = nil
   386  				}
   387  			case c1[1] <- 0:
   388  				n[1]++
   389  				if n[1] == N {
   390  					c1[1] = nil
   391  				}
   392  			}
   393  		}
   394  		wg.Done()
   395  	}()
   396  	go func() {
   397  		var n [4]int
   398  		c1 := c
   399  		for i := 0; i < 4*N; i++ {
   400  			select {
   401  			case <-c1[0]:
   402  				n[0]++
   403  				if n[0] == N {
   404  					c1[0] = nil
   405  				}
   406  			case <-c1[1]:
   407  				n[1]++
   408  				if n[1] == N {
   409  					c1[1] = nil
   410  				}
   411  			case <-c1[2]:
   412  				n[2]++
   413  				if n[2] == N {
   414  					c1[2] = nil
   415  				}
   416  			case <-c1[3]:
   417  				n[3]++
   418  				if n[3] == N {
   419  					c1[3] = nil
   420  				}
   421  			}
   422  		}
   423  		wg.Done()
   424  	}()
   425  	wg.Wait()
   426  }
   427  
   428  func TestChanSendInterface(t *testing.T) {
   429  	type mt struct{}
   430  	m := &mt{}
   431  	c := make(chan interface{}, 1)
   432  	c <- m
   433  	select {
   434  	case c <- m:
   435  	default:
   436  	}
   437  	select {
   438  	case c <- m:
   439  	case c <- &mt{}:
   440  	default:
   441  	}
   442  }
   443  
   444  func TestPseudoRandomSend(t *testing.T) {
   445  	n := 100
   446  	for _, chanCap := range []int{0, n} {
   447  		c := make(chan int, chanCap)
   448  		l := make([]int, n)
   449  		var m sync.Mutex
   450  		m.Lock()
   451  		go func() {
   452  			for i := 0; i < n; i++ {
   453  				runtime.Gosched()
   454  				l[i] = <-c
   455  			}
   456  			m.Unlock()
   457  		}()
   458  		for i := 0; i < n; i++ {
   459  			select {
   460  			case c <- 1:
   461  			case c <- 0:
   462  			}
   463  		}
   464  		m.Lock() // wait
   465  		n0 := 0
   466  		n1 := 0
   467  		for _, i := range l {
   468  			n0 += (i + 1) % 2
   469  			n1 += i
   470  		}
   471  		if n0 <= n/10 || n1 <= n/10 {
   472  			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
   473  		}
   474  	}
   475  }
   476  
   477  func TestMultiConsumer(t *testing.T) {
   478  	const nwork = 23
   479  	const niter = 271828
   480  
   481  	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
   482  
   483  	q := make(chan int, nwork*3)
   484  	r := make(chan int, nwork*3)
   485  
   486  	// workers
   487  	var wg sync.WaitGroup
   488  	for i := 0; i < nwork; i++ {
   489  		wg.Add(1)
   490  		go func(w int) {
   491  			for v := range q {
   492  				// mess with the fifo-ish nature of range
   493  				if pn[w%len(pn)] == v {
   494  					runtime.Gosched()
   495  				}
   496  				r <- v
   497  			}
   498  			wg.Done()
   499  		}(i)
   500  	}
   501  
   502  	// feeder & closer
   503  	expect := 0
   504  	go func() {
   505  		for i := 0; i < niter; i++ {
   506  			v := pn[i%len(pn)]
   507  			expect += v
   508  			q <- v
   509  		}
   510  		close(q)  // no more work
   511  		wg.Wait() // workers done
   512  		close(r)  // ... so there can be no more results
   513  	}()
   514  
   515  	// consume & check
   516  	n := 0
   517  	s := 0
   518  	for v := range r {
   519  		n++
   520  		s += v
   521  	}
   522  	if n != niter || s != expect {
   523  		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
   524  			expect, s, niter, n)
   525  	}
   526  }
   527  
   528  func TestShrinkStackDuringBlockedSend(t *testing.T) {
   529  	// make sure that channel operations still work when we are
   530  	// blocked on a channel send and we shrink the stack.
   531  	// NOTE: this test probably won't fail unless stack1.go:stackDebug
   532  	// is set to >= 1.
   533  	const n = 10
   534  	c := make(chan int)
   535  	done := make(chan struct{})
   536  
   537  	go func() {
   538  		for i := 0; i < n; i++ {
   539  			c <- i
   540  			// use lots of stack, briefly.
   541  			stackGrowthRecursive(20)
   542  		}
   543  		done <- struct{}{}
   544  	}()
   545  
   546  	for i := 0; i < n; i++ {
   547  		x := <-c
   548  		if x != i {
   549  			t.Errorf("bad channel read: want %d, got %d", i, x)
   550  		}
   551  		// Waste some time so sender can finish using lots of stack
   552  		// and block in channel send.
   553  		time.Sleep(1 * time.Millisecond)
   554  		// trigger GC which will shrink the stack of the sender.
   555  		runtime.GC()
   556  	}
   557  	<-done
   558  }
   559  
   560  func TestSelectDuplicateChannel(t *testing.T) {
   561  	// This test makes sure we can queue a G on
   562  	// the same channel multiple times.
   563  	c := make(chan int)
   564  	d := make(chan int)
   565  	e := make(chan int)
   566  
   567  	// goroutine A
   568  	go func() {
   569  		select {
   570  		case <-c:
   571  		case <-c:
   572  		case <-d:
   573  		}
   574  		e <- 9
   575  	}()
   576  	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
   577  
   578  	// goroutine B
   579  	go func() {
   580  		<-c
   581  	}()
   582  	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
   583  
   584  	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
   585  	<-e    // A tells us it's done
   586  	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
   587  }
   588  
   589  var selectSink interface{}
   590  
   591  func TestSelectStackAdjust(t *testing.T) {
   592  	// Test that channel receive slots that contain local stack
   593  	// pointers are adjusted correctly by stack shrinking.
   594  	c := make(chan *int)
   595  	d := make(chan *int)
   596  	ready1 := make(chan bool)
   597  	ready2 := make(chan bool)
   598  
   599  	f := func(ready chan bool, dup bool) {
   600  		// Temporarily grow the stack to 10K.
   601  		stackGrowthRecursive((10 << 10) / (128 * 8))
   602  
   603  		// We're ready to trigger GC and stack shrink.
   604  		ready <- true
   605  
   606  		val := 42
   607  		var cx *int
   608  		cx = &val
   609  
   610  		var c2 chan *int
   611  		var d2 chan *int
   612  		if dup {
   613  			c2 = c
   614  			d2 = d
   615  		}
   616  
   617  		// Receive from d. cx won't be affected.
   618  		select {
   619  		case cx = <-c:
   620  		case <-c2:
   621  		case <-d:
   622  		case <-d2:
   623  		}
   624  
   625  		// Check that pointer in cx was adjusted correctly.
   626  		if cx != &val {
   627  			t.Error("cx no longer points to val")
   628  		} else if val != 42 {
   629  			t.Error("val changed")
   630  		} else {
   631  			*cx = 43
   632  			if val != 43 {
   633  				t.Error("changing *cx failed to change val")
   634  			}
   635  		}
   636  		ready <- true
   637  	}
   638  
   639  	go f(ready1, false)
   640  	go f(ready2, true)
   641  
   642  	// Let the goroutines get into the select.
   643  	<-ready1
   644  	<-ready2
   645  	time.Sleep(10 * time.Millisecond)
   646  
   647  	// Force concurrent GC a few times.
   648  	var before, after runtime.MemStats
   649  	runtime.ReadMemStats(&before)
   650  	for i := 0; i < 100; i++ {
   651  		selectSink = new([1 << 20]byte)
   652  		runtime.ReadMemStats(&after)
   653  		if after.NumGC-before.NumGC >= 2 {
   654  			goto done
   655  		}
   656  	}
   657  	t.Fatal("failed to trigger concurrent GC")
   658  done:
   659  	selectSink = nil
   660  
   661  	// Wake selects.
   662  	close(d)
   663  	<-ready1
   664  	<-ready2
   665  }
   666  
   667  func BenchmarkChanNonblocking(b *testing.B) {
   668  	myc := make(chan int)
   669  	b.RunParallel(func(pb *testing.PB) {
   670  		for pb.Next() {
   671  			select {
   672  			case <-myc:
   673  			default:
   674  			}
   675  		}
   676  	})
   677  }
   678  
   679  func BenchmarkSelectUncontended(b *testing.B) {
   680  	b.RunParallel(func(pb *testing.PB) {
   681  		myc1 := make(chan int, 1)
   682  		myc2 := make(chan int, 1)
   683  		myc1 <- 0
   684  		for pb.Next() {
   685  			select {
   686  			case <-myc1:
   687  				myc2 <- 0
   688  			case <-myc2:
   689  				myc1 <- 0
   690  			}
   691  		}
   692  	})
   693  }
   694  
   695  func BenchmarkSelectSyncContended(b *testing.B) {
   696  	myc1 := make(chan int)
   697  	myc2 := make(chan int)
   698  	myc3 := make(chan int)
   699  	done := make(chan int)
   700  	b.RunParallel(func(pb *testing.PB) {
   701  		go func() {
   702  			for {
   703  				select {
   704  				case myc1 <- 0:
   705  				case myc2 <- 0:
   706  				case myc3 <- 0:
   707  				case <-done:
   708  					return
   709  				}
   710  			}
   711  		}()
   712  		for pb.Next() {
   713  			select {
   714  			case <-myc1:
   715  			case <-myc2:
   716  			case <-myc3:
   717  			}
   718  		}
   719  	})
   720  	close(done)
   721  }
   722  
   723  func BenchmarkSelectAsyncContended(b *testing.B) {
   724  	procs := runtime.GOMAXPROCS(0)
   725  	myc1 := make(chan int, procs)
   726  	myc2 := make(chan int, procs)
   727  	b.RunParallel(func(pb *testing.PB) {
   728  		myc1 <- 0
   729  		for pb.Next() {
   730  			select {
   731  			case <-myc1:
   732  				myc2 <- 0
   733  			case <-myc2:
   734  				myc1 <- 0
   735  			}
   736  		}
   737  	})
   738  }
   739  
   740  func BenchmarkSelectNonblock(b *testing.B) {
   741  	myc1 := make(chan int)
   742  	myc2 := make(chan int)
   743  	myc3 := make(chan int, 1)
   744  	myc4 := make(chan int, 1)
   745  	b.RunParallel(func(pb *testing.PB) {
   746  		for pb.Next() {
   747  			select {
   748  			case <-myc1:
   749  			default:
   750  			}
   751  			select {
   752  			case myc2 <- 0:
   753  			default:
   754  			}
   755  			select {
   756  			case <-myc3:
   757  			default:
   758  			}
   759  			select {
   760  			case myc4 <- 0:
   761  			default:
   762  			}
   763  		}
   764  	})
   765  }
   766  
   767  func BenchmarkChanUncontended(b *testing.B) {
   768  	const C = 100
   769  	b.RunParallel(func(pb *testing.PB) {
   770  		myc := make(chan int, C)
   771  		for pb.Next() {
   772  			for i := 0; i < C; i++ {
   773  				myc <- 0
   774  			}
   775  			for i := 0; i < C; i++ {
   776  				<-myc
   777  			}
   778  		}
   779  	})
   780  }
   781  
   782  func BenchmarkChanContended(b *testing.B) {
   783  	const C = 100
   784  	myc := make(chan int, C*runtime.GOMAXPROCS(0))
   785  	b.RunParallel(func(pb *testing.PB) {
   786  		for pb.Next() {
   787  			for i := 0; i < C; i++ {
   788  				myc <- 0
   789  			}
   790  			for i := 0; i < C; i++ {
   791  				<-myc
   792  			}
   793  		}
   794  	})
   795  }
   796  
   797  func benchmarkChanSync(b *testing.B, work int) {
   798  	const CallsPerSched = 1000
   799  	procs := 2
   800  	N := int32(b.N / CallsPerSched / procs * procs)
   801  	c := make(chan bool, procs)
   802  	myc := make(chan int)
   803  	for p := 0; p < procs; p++ {
   804  		go func() {
   805  			for {
   806  				i := atomic.AddInt32(&N, -1)
   807  				if i < 0 {
   808  					break
   809  				}
   810  				for g := 0; g < CallsPerSched; g++ {
   811  					if i%2 == 0 {
   812  						<-myc
   813  						localWork(work)
   814  						myc <- 0
   815  						localWork(work)
   816  					} else {
   817  						myc <- 0
   818  						localWork(work)
   819  						<-myc
   820  						localWork(work)
   821  					}
   822  				}
   823  			}
   824  			c <- true
   825  		}()
   826  	}
   827  	for p := 0; p < procs; p++ {
   828  		<-c
   829  	}
   830  }
   831  
   832  func BenchmarkChanSync(b *testing.B) {
   833  	benchmarkChanSync(b, 0)
   834  }
   835  
   836  func BenchmarkChanSyncWork(b *testing.B) {
   837  	benchmarkChanSync(b, 1000)
   838  }
   839  
   840  func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
   841  	const CallsPerSched = 1000
   842  	procs := runtime.GOMAXPROCS(-1)
   843  	N := int32(b.N / CallsPerSched)
   844  	c := make(chan bool, 2*procs)
   845  	myc := make(chan int, chanSize)
   846  	for p := 0; p < procs; p++ {
   847  		go func() {
   848  			foo := 0
   849  			for atomic.AddInt32(&N, -1) >= 0 {
   850  				for g := 0; g < CallsPerSched; g++ {
   851  					for i := 0; i < localWork; i++ {
   852  						foo *= 2
   853  						foo /= 2
   854  					}
   855  					myc <- 1
   856  				}
   857  			}
   858  			myc <- 0
   859  			c <- foo == 42
   860  		}()
   861  		go func() {
   862  			foo := 0
   863  			for {
   864  				v := <-myc
   865  				if v == 0 {
   866  					break
   867  				}
   868  				for i := 0; i < localWork; i++ {
   869  					foo *= 2
   870  					foo /= 2
   871  				}
   872  			}
   873  			c <- foo == 42
   874  		}()
   875  	}
   876  	for p := 0; p < procs; p++ {
   877  		<-c
   878  		<-c
   879  	}
   880  }
   881  
   882  func BenchmarkChanProdCons0(b *testing.B) {
   883  	benchmarkChanProdCons(b, 0, 0)
   884  }
   885  
   886  func BenchmarkChanProdCons10(b *testing.B) {
   887  	benchmarkChanProdCons(b, 10, 0)
   888  }
   889  
   890  func BenchmarkChanProdCons100(b *testing.B) {
   891  	benchmarkChanProdCons(b, 100, 0)
   892  }
   893  
   894  func BenchmarkChanProdConsWork0(b *testing.B) {
   895  	benchmarkChanProdCons(b, 0, 100)
   896  }
   897  
   898  func BenchmarkChanProdConsWork10(b *testing.B) {
   899  	benchmarkChanProdCons(b, 10, 100)
   900  }
   901  
   902  func BenchmarkChanProdConsWork100(b *testing.B) {
   903  	benchmarkChanProdCons(b, 100, 100)
   904  }
   905  
   906  func BenchmarkSelectProdCons(b *testing.B) {
   907  	const CallsPerSched = 1000
   908  	procs := runtime.GOMAXPROCS(-1)
   909  	N := int32(b.N / CallsPerSched)
   910  	c := make(chan bool, 2*procs)
   911  	myc := make(chan int, 128)
   912  	myclose := make(chan bool)
   913  	for p := 0; p < procs; p++ {
   914  		go func() {
   915  			// Producer: sends to myc.
   916  			foo := 0
   917  			// Intended to not fire during benchmarking.
   918  			mytimer := time.After(time.Hour)
   919  			for atomic.AddInt32(&N, -1) >= 0 {
   920  				for g := 0; g < CallsPerSched; g++ {
   921  					// Model some local work.
   922  					for i := 0; i < 100; i++ {
   923  						foo *= 2
   924  						foo /= 2
   925  					}
   926  					select {
   927  					case myc <- 1:
   928  					case <-mytimer:
   929  					case <-myclose:
   930  					}
   931  				}
   932  			}
   933  			myc <- 0
   934  			c <- foo == 42
   935  		}()
   936  		go func() {
   937  			// Consumer: receives from myc.
   938  			foo := 0
   939  			// Intended to not fire during benchmarking.
   940  			mytimer := time.After(time.Hour)
   941  		loop:
   942  			for {
   943  				select {
   944  				case v := <-myc:
   945  					if v == 0 {
   946  						break loop
   947  					}
   948  				case <-mytimer:
   949  				case <-myclose:
   950  				}
   951  				// Model some local work.
   952  				for i := 0; i < 100; i++ {
   953  					foo *= 2
   954  					foo /= 2
   955  				}
   956  			}
   957  			c <- foo == 42
   958  		}()
   959  	}
   960  	for p := 0; p < procs; p++ {
   961  		<-c
   962  		<-c
   963  	}
   964  }
   965  
   966  func BenchmarkChanCreation(b *testing.B) {
   967  	b.RunParallel(func(pb *testing.PB) {
   968  		for pb.Next() {
   969  			myc := make(chan int, 1)
   970  			myc <- 0
   971  			<-myc
   972  		}
   973  	})
   974  }
   975  
   976  func BenchmarkChanSem(b *testing.B) {
   977  	type Empty struct{}
   978  	myc := make(chan Empty, runtime.GOMAXPROCS(0))
   979  	b.RunParallel(func(pb *testing.PB) {
   980  		for pb.Next() {
   981  			myc <- Empty{}
   982  			<-myc
   983  		}
   984  	})
   985  }
   986  
   987  func BenchmarkChanPopular(b *testing.B) {
   988  	const n = 1000
   989  	c := make(chan bool)
   990  	var a []chan bool
   991  	var wg sync.WaitGroup
   992  	wg.Add(n)
   993  	for j := 0; j < n; j++ {
   994  		d := make(chan bool)
   995  		a = append(a, d)
   996  		go func() {
   997  			for i := 0; i < b.N; i++ {
   998  				select {
   999  				case <-c:
  1000  				case <-d:
  1001  				}
  1002  			}
  1003  			wg.Done()
  1004  		}()
  1005  	}
  1006  	for i := 0; i < b.N; i++ {
  1007  		for _, d := range a {
  1008  			d <- true
  1009  		}
  1010  	}
  1011  	wg.Wait()
  1012  }
  1013  
  1014  var (
  1015  	alwaysFalse = false
  1016  	workSink    = 0
  1017  )
  1018  
  1019  func localWork(w int) {
  1020  	foo := 0
  1021  	for i := 0; i < w; i++ {
  1022  		foo /= (foo + 1)
  1023  	}
  1024  	if alwaysFalse {
  1025  		workSink += foo
  1026  	}
  1027  }