github.com/rafaeltorres324/go/src@v0.0.0-20210519164414-9fdf653a9838/runtime/chan_test.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "internal/testenv" 9 "math" 10 "runtime" 11 "sync" 12 "sync/atomic" 13 "testing" 14 "time" 15 ) 16 17 func TestChan(t *testing.T) { 18 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) 19 N := 200 20 if testing.Short() { 21 N = 20 22 } 23 for chanCap := 0; chanCap < N; chanCap++ { 24 { 25 // Ensure that receive from empty chan blocks. 26 c := make(chan int, chanCap) 27 recv1 := false 28 go func() { 29 _ = <-c 30 recv1 = true 31 }() 32 recv2 := false 33 go func() { 34 _, _ = <-c 35 recv2 = true 36 }() 37 time.Sleep(time.Millisecond) 38 if recv1 || recv2 { 39 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 40 } 41 // Ensure that non-blocking receive does not block. 42 select { 43 case _ = <-c: 44 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 45 default: 46 } 47 select { 48 case _, _ = <-c: 49 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 50 default: 51 } 52 c <- 0 53 c <- 0 54 } 55 56 { 57 // Ensure that send to full chan blocks. 58 c := make(chan int, chanCap) 59 for i := 0; i < chanCap; i++ { 60 c <- i 61 } 62 sent := uint32(0) 63 go func() { 64 c <- 0 65 atomic.StoreUint32(&sent, 1) 66 }() 67 time.Sleep(time.Millisecond) 68 if atomic.LoadUint32(&sent) != 0 { 69 t.Fatalf("chan[%d]: send to full chan", chanCap) 70 } 71 // Ensure that non-blocking send does not block. 72 select { 73 case c <- 0: 74 t.Fatalf("chan[%d]: send to full chan", chanCap) 75 default: 76 } 77 <-c 78 } 79 80 { 81 // Ensure that we receive 0 from closed chan. 82 c := make(chan int, chanCap) 83 for i := 0; i < chanCap; i++ { 84 c <- i 85 } 86 close(c) 87 for i := 0; i < chanCap; i++ { 88 v := <-c 89 if v != i { 90 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 91 } 92 } 93 if v := <-c; v != 0 { 94 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0) 95 } 96 if v, ok := <-c; v != 0 || ok { 97 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false) 98 } 99 } 100 101 { 102 // Ensure that close unblocks receive. 103 c := make(chan int, chanCap) 104 done := make(chan bool) 105 go func() { 106 v, ok := <-c 107 done <- v == 0 && ok == false 108 }() 109 time.Sleep(time.Millisecond) 110 close(c) 111 if !<-done { 112 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap) 113 } 114 } 115 116 { 117 // Send 100 integers, 118 // ensure that we receive them non-corrupted in FIFO order. 119 c := make(chan int, chanCap) 120 go func() { 121 for i := 0; i < 100; i++ { 122 c <- i 123 } 124 }() 125 for i := 0; i < 100; i++ { 126 v := <-c 127 if v != i { 128 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 129 } 130 } 131 132 // Same, but using recv2. 133 go func() { 134 for i := 0; i < 100; i++ { 135 c <- i 136 } 137 }() 138 for i := 0; i < 100; i++ { 139 v, ok := <-c 140 if !ok { 141 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i) 142 } 143 if v != i { 144 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 145 } 146 } 147 148 // Send 1000 integers in 4 goroutines, 149 // ensure that we receive what we send. 150 const P = 4 151 const L = 1000 152 for p := 0; p < P; p++ { 153 go func() { 154 for i := 0; i < L; i++ { 155 c <- i 156 } 157 }() 158 } 159 done := make(chan map[int]int) 160 for p := 0; p < P; p++ { 161 go func() { 162 recv := make(map[int]int) 163 for i := 0; i < L; i++ { 164 v := <-c 165 recv[v] = recv[v] + 1 166 } 167 done <- recv 168 }() 169 } 170 recv := make(map[int]int) 171 for p := 0; p < P; p++ { 172 for k, v := range <-done { 173 recv[k] = recv[k] + v 174 } 175 } 176 if len(recv) != L { 177 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L) 178 } 179 for _, v := range recv { 180 if v != P { 181 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P) 182 } 183 } 184 } 185 186 { 187 // Test len/cap. 188 c := make(chan int, chanCap) 189 if len(c) != 0 || cap(c) != chanCap { 190 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c)) 191 } 192 for i := 0; i < chanCap; i++ { 193 c <- i 194 } 195 if len(c) != chanCap || cap(c) != chanCap { 196 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c)) 197 } 198 } 199 200 } 201 } 202 203 func TestNonblockRecvRace(t *testing.T) { 204 n := 10000 205 if testing.Short() { 206 n = 100 207 } 208 for i := 0; i < n; i++ { 209 c := make(chan int, 1) 210 c <- 1 211 go func() { 212 select { 213 case <-c: 214 default: 215 t.Error("chan is not ready") 216 } 217 }() 218 close(c) 219 <-c 220 if t.Failed() { 221 return 222 } 223 } 224 } 225 226 // This test checks that select acts on the state of the channels at one 227 // moment in the execution, not over a smeared time window. 228 // In the test, one goroutine does: 229 // create c1, c2 230 // make c1 ready for receiving 231 // create second goroutine 232 // make c2 ready for receiving 233 // make c1 no longer ready for receiving (if possible) 234 // The second goroutine does a non-blocking select receiving from c1 and c2. 235 // From the time the second goroutine is created, at least one of c1 and c2 236 // is always ready for receiving, so the select in the second goroutine must 237 // always receive from one or the other. It must never execute the default case. 238 func TestNonblockSelectRace(t *testing.T) { 239 n := 100000 240 if testing.Short() { 241 n = 1000 242 } 243 done := make(chan bool, 1) 244 for i := 0; i < n; i++ { 245 c1 := make(chan int, 1) 246 c2 := make(chan int, 1) 247 c1 <- 1 248 go func() { 249 select { 250 case <-c1: 251 case <-c2: 252 default: 253 done <- false 254 return 255 } 256 done <- true 257 }() 258 c2 <- 1 259 select { 260 case <-c1: 261 default: 262 } 263 if !<-done { 264 t.Fatal("no chan is ready") 265 } 266 } 267 } 268 269 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1. 270 func TestNonblockSelectRace2(t *testing.T) { 271 n := 100000 272 if testing.Short() { 273 n = 1000 274 } 275 done := make(chan bool, 1) 276 for i := 0; i < n; i++ { 277 c1 := make(chan int, 1) 278 c2 := make(chan int) 279 c1 <- 1 280 go func() { 281 select { 282 case <-c1: 283 case <-c2: 284 default: 285 done <- false 286 return 287 } 288 done <- true 289 }() 290 close(c2) 291 select { 292 case <-c1: 293 default: 294 } 295 if !<-done { 296 t.Fatal("no chan is ready") 297 } 298 } 299 } 300 301 func TestSelfSelect(t *testing.T) { 302 // Ensure that send/recv on the same chan in select 303 // does not crash nor deadlock. 304 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 305 for _, chanCap := range []int{0, 10} { 306 var wg sync.WaitGroup 307 wg.Add(2) 308 c := make(chan int, chanCap) 309 for p := 0; p < 2; p++ { 310 p := p 311 go func() { 312 defer wg.Done() 313 for i := 0; i < 1000; i++ { 314 if p == 0 || i%2 == 0 { 315 select { 316 case c <- p: 317 case v := <-c: 318 if chanCap == 0 && v == p { 319 t.Errorf("self receive") 320 return 321 } 322 } 323 } else { 324 select { 325 case v := <-c: 326 if chanCap == 0 && v == p { 327 t.Errorf("self receive") 328 return 329 } 330 case c <- p: 331 } 332 } 333 } 334 }() 335 } 336 wg.Wait() 337 } 338 } 339 340 func TestSelectStress(t *testing.T) { 341 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10)) 342 var c [4]chan int 343 c[0] = make(chan int) 344 c[1] = make(chan int) 345 c[2] = make(chan int, 2) 346 c[3] = make(chan int, 3) 347 N := int(1e5) 348 if testing.Short() { 349 N /= 10 350 } 351 // There are 4 goroutines that send N values on each of the chans, 352 // + 4 goroutines that receive N values on each of the chans, 353 // + 1 goroutine that sends N values on each of the chans in a single select, 354 // + 1 goroutine that receives N values on each of the chans in a single select. 355 // All these sends, receives and selects interact chaotically at runtime, 356 // but we are careful that this whole construct does not deadlock. 357 var wg sync.WaitGroup 358 wg.Add(10) 359 for k := 0; k < 4; k++ { 360 k := k 361 go func() { 362 for i := 0; i < N; i++ { 363 c[k] <- 0 364 } 365 wg.Done() 366 }() 367 go func() { 368 for i := 0; i < N; i++ { 369 <-c[k] 370 } 371 wg.Done() 372 }() 373 } 374 go func() { 375 var n [4]int 376 c1 := c 377 for i := 0; i < 4*N; i++ { 378 select { 379 case c1[3] <- 0: 380 n[3]++ 381 if n[3] == N { 382 c1[3] = nil 383 } 384 case c1[2] <- 0: 385 n[2]++ 386 if n[2] == N { 387 c1[2] = nil 388 } 389 case c1[0] <- 0: 390 n[0]++ 391 if n[0] == N { 392 c1[0] = nil 393 } 394 case c1[1] <- 0: 395 n[1]++ 396 if n[1] == N { 397 c1[1] = nil 398 } 399 } 400 } 401 wg.Done() 402 }() 403 go func() { 404 var n [4]int 405 c1 := c 406 for i := 0; i < 4*N; i++ { 407 select { 408 case <-c1[0]: 409 n[0]++ 410 if n[0] == N { 411 c1[0] = nil 412 } 413 case <-c1[1]: 414 n[1]++ 415 if n[1] == N { 416 c1[1] = nil 417 } 418 case <-c1[2]: 419 n[2]++ 420 if n[2] == N { 421 c1[2] = nil 422 } 423 case <-c1[3]: 424 n[3]++ 425 if n[3] == N { 426 c1[3] = nil 427 } 428 } 429 } 430 wg.Done() 431 }() 432 wg.Wait() 433 } 434 435 func TestSelectFairness(t *testing.T) { 436 const trials = 10000 437 if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" { 438 testenv.SkipFlaky(t, 22047) 439 } 440 c1 := make(chan byte, trials+1) 441 c2 := make(chan byte, trials+1) 442 for i := 0; i < trials+1; i++ { 443 c1 <- 1 444 c2 <- 2 445 } 446 c3 := make(chan byte) 447 c4 := make(chan byte) 448 out := make(chan byte) 449 done := make(chan byte) 450 var wg sync.WaitGroup 451 wg.Add(1) 452 go func() { 453 defer wg.Done() 454 for { 455 var b byte 456 select { 457 case b = <-c3: 458 case b = <-c4: 459 case b = <-c1: 460 case b = <-c2: 461 } 462 select { 463 case out <- b: 464 case <-done: 465 return 466 } 467 } 468 }() 469 cnt1, cnt2 := 0, 0 470 for i := 0; i < trials; i++ { 471 switch b := <-out; b { 472 case 1: 473 cnt1++ 474 case 2: 475 cnt2++ 476 default: 477 t.Fatalf("unexpected value %d on channel", b) 478 } 479 } 480 // If the select in the goroutine is fair, 481 // cnt1 and cnt2 should be about the same value. 482 // With 10,000 trials, the expected margin of error at 483 // a confidence level of six nines is 4.891676 / (2 * Sqrt(10000)). 484 r := float64(cnt1) / trials 485 e := math.Abs(r - 0.5) 486 t.Log(cnt1, cnt2, r, e) 487 if e > 4.891676/(2*math.Sqrt(trials)) { 488 t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2) 489 } 490 close(done) 491 wg.Wait() 492 } 493 494 func TestChanSendInterface(t *testing.T) { 495 type mt struct{} 496 m := &mt{} 497 c := make(chan interface{}, 1) 498 c <- m 499 select { 500 case c <- m: 501 default: 502 } 503 select { 504 case c <- m: 505 case c <- &mt{}: 506 default: 507 } 508 } 509 510 func TestPseudoRandomSend(t *testing.T) { 511 n := 100 512 for _, chanCap := range []int{0, n} { 513 c := make(chan int, chanCap) 514 l := make([]int, n) 515 var m sync.Mutex 516 m.Lock() 517 go func() { 518 for i := 0; i < n; i++ { 519 runtime.Gosched() 520 l[i] = <-c 521 } 522 m.Unlock() 523 }() 524 for i := 0; i < n; i++ { 525 select { 526 case c <- 1: 527 case c <- 0: 528 } 529 } 530 m.Lock() // wait 531 n0 := 0 532 n1 := 0 533 for _, i := range l { 534 n0 += (i + 1) % 2 535 n1 += i 536 } 537 if n0 <= n/10 || n1 <= n/10 { 538 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap) 539 } 540 } 541 } 542 543 func TestMultiConsumer(t *testing.T) { 544 const nwork = 23 545 const niter = 271828 546 547 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31} 548 549 q := make(chan int, nwork*3) 550 r := make(chan int, nwork*3) 551 552 // workers 553 var wg sync.WaitGroup 554 for i := 0; i < nwork; i++ { 555 wg.Add(1) 556 go func(w int) { 557 for v := range q { 558 // mess with the fifo-ish nature of range 559 if pn[w%len(pn)] == v { 560 runtime.Gosched() 561 } 562 r <- v 563 } 564 wg.Done() 565 }(i) 566 } 567 568 // feeder & closer 569 expect := 0 570 go func() { 571 for i := 0; i < niter; i++ { 572 v := pn[i%len(pn)] 573 expect += v 574 q <- v 575 } 576 close(q) // no more work 577 wg.Wait() // workers done 578 close(r) // ... so there can be no more results 579 }() 580 581 // consume & check 582 n := 0 583 s := 0 584 for v := range r { 585 n++ 586 s += v 587 } 588 if n != niter || s != expect { 589 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)", 590 expect, s, niter, n) 591 } 592 } 593 594 func TestShrinkStackDuringBlockedSend(t *testing.T) { 595 // make sure that channel operations still work when we are 596 // blocked on a channel send and we shrink the stack. 597 // NOTE: this test probably won't fail unless stack1.go:stackDebug 598 // is set to >= 1. 599 const n = 10 600 c := make(chan int) 601 done := make(chan struct{}) 602 603 go func() { 604 for i := 0; i < n; i++ { 605 c <- i 606 // use lots of stack, briefly. 607 stackGrowthRecursive(20) 608 } 609 done <- struct{}{} 610 }() 611 612 for i := 0; i < n; i++ { 613 x := <-c 614 if x != i { 615 t.Errorf("bad channel read: want %d, got %d", i, x) 616 } 617 // Waste some time so sender can finish using lots of stack 618 // and block in channel send. 619 time.Sleep(1 * time.Millisecond) 620 // trigger GC which will shrink the stack of the sender. 621 runtime.GC() 622 } 623 <-done 624 } 625 626 func TestNoShrinkStackWhileParking(t *testing.T) { 627 // The goal of this test is to trigger a "racy sudog adjustment" 628 // throw. Basically, there's a window between when a goroutine 629 // becomes available for preemption for stack scanning (and thus, 630 // stack shrinking) but before the goroutine has fully parked on a 631 // channel. See issue 40641 for more details on the problem. 632 // 633 // The way we try to induce this failure is to set up two 634 // goroutines: a sender and a reciever that communicate across 635 // a channel. We try to set up a situation where the sender 636 // grows its stack temporarily then *fully* blocks on a channel 637 // often. Meanwhile a GC is triggered so that we try to get a 638 // mark worker to shrink the sender's stack and race with the 639 // sender parking. 640 // 641 // Unfortunately the race window here is so small that we 642 // either need a ridiculous number of iterations, or we add 643 // "usleep(1000)" to park_m, just before the unlockf call. 644 const n = 10 645 send := func(c chan<- int, done chan struct{}) { 646 for i := 0; i < n; i++ { 647 c <- i 648 // Use lots of stack briefly so that 649 // the GC is going to want to shrink us 650 // when it scans us. Make sure not to 651 // do any function calls otherwise 652 // in order to avoid us shrinking ourselves 653 // when we're preempted. 654 stackGrowthRecursive(20) 655 } 656 done <- struct{}{} 657 } 658 recv := func(c <-chan int, done chan struct{}) { 659 for i := 0; i < n; i++ { 660 // Sleep here so that the sender always 661 // fully blocks. 662 time.Sleep(10 * time.Microsecond) 663 <-c 664 } 665 done <- struct{}{} 666 } 667 for i := 0; i < n*20; i++ { 668 c := make(chan int) 669 done := make(chan struct{}) 670 go recv(c, done) 671 go send(c, done) 672 // Wait a little bit before triggering 673 // the GC to make sure the sender and 674 // reciever have gotten into their groove. 675 time.Sleep(50 * time.Microsecond) 676 runtime.GC() 677 <-done 678 <-done 679 } 680 } 681 682 func TestSelectDuplicateChannel(t *testing.T) { 683 // This test makes sure we can queue a G on 684 // the same channel multiple times. 685 c := make(chan int) 686 d := make(chan int) 687 e := make(chan int) 688 689 // goroutine A 690 go func() { 691 select { 692 case <-c: 693 case <-c: 694 case <-d: 695 } 696 e <- 9 697 }() 698 time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c 699 700 // goroutine B 701 go func() { 702 <-c 703 }() 704 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing 705 706 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. 707 <-e // A tells us it's done 708 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) 709 } 710 711 var selectSink interface{} 712 713 func TestSelectStackAdjust(t *testing.T) { 714 // Test that channel receive slots that contain local stack 715 // pointers are adjusted correctly by stack shrinking. 716 c := make(chan *int) 717 d := make(chan *int) 718 ready1 := make(chan bool) 719 ready2 := make(chan bool) 720 721 f := func(ready chan bool, dup bool) { 722 // Temporarily grow the stack to 10K. 723 stackGrowthRecursive((10 << 10) / (128 * 8)) 724 725 // We're ready to trigger GC and stack shrink. 726 ready <- true 727 728 val := 42 729 var cx *int 730 cx = &val 731 732 var c2 chan *int 733 var d2 chan *int 734 if dup { 735 c2 = c 736 d2 = d 737 } 738 739 // Receive from d. cx won't be affected. 740 select { 741 case cx = <-c: 742 case <-c2: 743 case <-d: 744 case <-d2: 745 } 746 747 // Check that pointer in cx was adjusted correctly. 748 if cx != &val { 749 t.Error("cx no longer points to val") 750 } else if val != 42 { 751 t.Error("val changed") 752 } else { 753 *cx = 43 754 if val != 43 { 755 t.Error("changing *cx failed to change val") 756 } 757 } 758 ready <- true 759 } 760 761 go f(ready1, false) 762 go f(ready2, true) 763 764 // Let the goroutines get into the select. 765 <-ready1 766 <-ready2 767 time.Sleep(10 * time.Millisecond) 768 769 // Force concurrent GC a few times. 770 var before, after runtime.MemStats 771 runtime.ReadMemStats(&before) 772 for i := 0; i < 100; i++ { 773 selectSink = new([1 << 20]byte) 774 runtime.ReadMemStats(&after) 775 if after.NumGC-before.NumGC >= 2 { 776 goto done 777 } 778 runtime.Gosched() 779 } 780 t.Fatal("failed to trigger concurrent GC") 781 done: 782 selectSink = nil 783 784 // Wake selects. 785 close(d) 786 <-ready1 787 <-ready2 788 } 789 790 type struct0 struct{} 791 792 func BenchmarkMakeChan(b *testing.B) { 793 b.Run("Byte", func(b *testing.B) { 794 var x chan byte 795 for i := 0; i < b.N; i++ { 796 x = make(chan byte, 8) 797 } 798 close(x) 799 }) 800 b.Run("Int", func(b *testing.B) { 801 var x chan int 802 for i := 0; i < b.N; i++ { 803 x = make(chan int, 8) 804 } 805 close(x) 806 }) 807 b.Run("Ptr", func(b *testing.B) { 808 var x chan *byte 809 for i := 0; i < b.N; i++ { 810 x = make(chan *byte, 8) 811 } 812 close(x) 813 }) 814 b.Run("Struct", func(b *testing.B) { 815 b.Run("0", func(b *testing.B) { 816 var x chan struct0 817 for i := 0; i < b.N; i++ { 818 x = make(chan struct0, 8) 819 } 820 close(x) 821 }) 822 b.Run("32", func(b *testing.B) { 823 var x chan struct32 824 for i := 0; i < b.N; i++ { 825 x = make(chan struct32, 8) 826 } 827 close(x) 828 }) 829 b.Run("40", func(b *testing.B) { 830 var x chan struct40 831 for i := 0; i < b.N; i++ { 832 x = make(chan struct40, 8) 833 } 834 close(x) 835 }) 836 }) 837 } 838 839 func BenchmarkChanNonblocking(b *testing.B) { 840 myc := make(chan int) 841 b.RunParallel(func(pb *testing.PB) { 842 for pb.Next() { 843 select { 844 case <-myc: 845 default: 846 } 847 } 848 }) 849 } 850 851 func BenchmarkSelectUncontended(b *testing.B) { 852 b.RunParallel(func(pb *testing.PB) { 853 myc1 := make(chan int, 1) 854 myc2 := make(chan int, 1) 855 myc1 <- 0 856 for pb.Next() { 857 select { 858 case <-myc1: 859 myc2 <- 0 860 case <-myc2: 861 myc1 <- 0 862 } 863 } 864 }) 865 } 866 867 func BenchmarkSelectSyncContended(b *testing.B) { 868 myc1 := make(chan int) 869 myc2 := make(chan int) 870 myc3 := make(chan int) 871 done := make(chan int) 872 b.RunParallel(func(pb *testing.PB) { 873 go func() { 874 for { 875 select { 876 case myc1 <- 0: 877 case myc2 <- 0: 878 case myc3 <- 0: 879 case <-done: 880 return 881 } 882 } 883 }() 884 for pb.Next() { 885 select { 886 case <-myc1: 887 case <-myc2: 888 case <-myc3: 889 } 890 } 891 }) 892 close(done) 893 } 894 895 func BenchmarkSelectAsyncContended(b *testing.B) { 896 procs := runtime.GOMAXPROCS(0) 897 myc1 := make(chan int, procs) 898 myc2 := make(chan int, procs) 899 b.RunParallel(func(pb *testing.PB) { 900 myc1 <- 0 901 for pb.Next() { 902 select { 903 case <-myc1: 904 myc2 <- 0 905 case <-myc2: 906 myc1 <- 0 907 } 908 } 909 }) 910 } 911 912 func BenchmarkSelectNonblock(b *testing.B) { 913 myc1 := make(chan int) 914 myc2 := make(chan int) 915 myc3 := make(chan int, 1) 916 myc4 := make(chan int, 1) 917 b.RunParallel(func(pb *testing.PB) { 918 for pb.Next() { 919 select { 920 case <-myc1: 921 default: 922 } 923 select { 924 case myc2 <- 0: 925 default: 926 } 927 select { 928 case <-myc3: 929 default: 930 } 931 select { 932 case myc4 <- 0: 933 default: 934 } 935 } 936 }) 937 } 938 939 func BenchmarkChanUncontended(b *testing.B) { 940 const C = 100 941 b.RunParallel(func(pb *testing.PB) { 942 myc := make(chan int, C) 943 for pb.Next() { 944 for i := 0; i < C; i++ { 945 myc <- 0 946 } 947 for i := 0; i < C; i++ { 948 <-myc 949 } 950 } 951 }) 952 } 953 954 func BenchmarkChanContended(b *testing.B) { 955 const C = 100 956 myc := make(chan int, C*runtime.GOMAXPROCS(0)) 957 b.RunParallel(func(pb *testing.PB) { 958 for pb.Next() { 959 for i := 0; i < C; i++ { 960 myc <- 0 961 } 962 for i := 0; i < C; i++ { 963 <-myc 964 } 965 } 966 }) 967 } 968 969 func benchmarkChanSync(b *testing.B, work int) { 970 const CallsPerSched = 1000 971 procs := 2 972 N := int32(b.N / CallsPerSched / procs * procs) 973 c := make(chan bool, procs) 974 myc := make(chan int) 975 for p := 0; p < procs; p++ { 976 go func() { 977 for { 978 i := atomic.AddInt32(&N, -1) 979 if i < 0 { 980 break 981 } 982 for g := 0; g < CallsPerSched; g++ { 983 if i%2 == 0 { 984 <-myc 985 localWork(work) 986 myc <- 0 987 localWork(work) 988 } else { 989 myc <- 0 990 localWork(work) 991 <-myc 992 localWork(work) 993 } 994 } 995 } 996 c <- true 997 }() 998 } 999 for p := 0; p < procs; p++ { 1000 <-c 1001 } 1002 } 1003 1004 func BenchmarkChanSync(b *testing.B) { 1005 benchmarkChanSync(b, 0) 1006 } 1007 1008 func BenchmarkChanSyncWork(b *testing.B) { 1009 benchmarkChanSync(b, 1000) 1010 } 1011 1012 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) { 1013 const CallsPerSched = 1000 1014 procs := runtime.GOMAXPROCS(-1) 1015 N := int32(b.N / CallsPerSched) 1016 c := make(chan bool, 2*procs) 1017 myc := make(chan int, chanSize) 1018 for p := 0; p < procs; p++ { 1019 go func() { 1020 foo := 0 1021 for atomic.AddInt32(&N, -1) >= 0 { 1022 for g := 0; g < CallsPerSched; g++ { 1023 for i := 0; i < localWork; i++ { 1024 foo *= 2 1025 foo /= 2 1026 } 1027 myc <- 1 1028 } 1029 } 1030 myc <- 0 1031 c <- foo == 42 1032 }() 1033 go func() { 1034 foo := 0 1035 for { 1036 v := <-myc 1037 if v == 0 { 1038 break 1039 } 1040 for i := 0; i < localWork; i++ { 1041 foo *= 2 1042 foo /= 2 1043 } 1044 } 1045 c <- foo == 42 1046 }() 1047 } 1048 for p := 0; p < procs; p++ { 1049 <-c 1050 <-c 1051 } 1052 } 1053 1054 func BenchmarkChanProdCons0(b *testing.B) { 1055 benchmarkChanProdCons(b, 0, 0) 1056 } 1057 1058 func BenchmarkChanProdCons10(b *testing.B) { 1059 benchmarkChanProdCons(b, 10, 0) 1060 } 1061 1062 func BenchmarkChanProdCons100(b *testing.B) { 1063 benchmarkChanProdCons(b, 100, 0) 1064 } 1065 1066 func BenchmarkChanProdConsWork0(b *testing.B) { 1067 benchmarkChanProdCons(b, 0, 100) 1068 } 1069 1070 func BenchmarkChanProdConsWork10(b *testing.B) { 1071 benchmarkChanProdCons(b, 10, 100) 1072 } 1073 1074 func BenchmarkChanProdConsWork100(b *testing.B) { 1075 benchmarkChanProdCons(b, 100, 100) 1076 } 1077 1078 func BenchmarkSelectProdCons(b *testing.B) { 1079 const CallsPerSched = 1000 1080 procs := runtime.GOMAXPROCS(-1) 1081 N := int32(b.N / CallsPerSched) 1082 c := make(chan bool, 2*procs) 1083 myc := make(chan int, 128) 1084 myclose := make(chan bool) 1085 for p := 0; p < procs; p++ { 1086 go func() { 1087 // Producer: sends to myc. 1088 foo := 0 1089 // Intended to not fire during benchmarking. 1090 mytimer := time.After(time.Hour) 1091 for atomic.AddInt32(&N, -1) >= 0 { 1092 for g := 0; g < CallsPerSched; g++ { 1093 // Model some local work. 1094 for i := 0; i < 100; i++ { 1095 foo *= 2 1096 foo /= 2 1097 } 1098 select { 1099 case myc <- 1: 1100 case <-mytimer: 1101 case <-myclose: 1102 } 1103 } 1104 } 1105 myc <- 0 1106 c <- foo == 42 1107 }() 1108 go func() { 1109 // Consumer: receives from myc. 1110 foo := 0 1111 // Intended to not fire during benchmarking. 1112 mytimer := time.After(time.Hour) 1113 loop: 1114 for { 1115 select { 1116 case v := <-myc: 1117 if v == 0 { 1118 break loop 1119 } 1120 case <-mytimer: 1121 case <-myclose: 1122 } 1123 // Model some local work. 1124 for i := 0; i < 100; i++ { 1125 foo *= 2 1126 foo /= 2 1127 } 1128 } 1129 c <- foo == 42 1130 }() 1131 } 1132 for p := 0; p < procs; p++ { 1133 <-c 1134 <-c 1135 } 1136 } 1137 1138 func BenchmarkChanCreation(b *testing.B) { 1139 b.RunParallel(func(pb *testing.PB) { 1140 for pb.Next() { 1141 myc := make(chan int, 1) 1142 myc <- 0 1143 <-myc 1144 } 1145 }) 1146 } 1147 1148 func BenchmarkChanSem(b *testing.B) { 1149 type Empty struct{} 1150 myc := make(chan Empty, runtime.GOMAXPROCS(0)) 1151 b.RunParallel(func(pb *testing.PB) { 1152 for pb.Next() { 1153 myc <- Empty{} 1154 <-myc 1155 } 1156 }) 1157 } 1158 1159 func BenchmarkChanPopular(b *testing.B) { 1160 const n = 1000 1161 c := make(chan bool) 1162 var a []chan bool 1163 var wg sync.WaitGroup 1164 wg.Add(n) 1165 for j := 0; j < n; j++ { 1166 d := make(chan bool) 1167 a = append(a, d) 1168 go func() { 1169 for i := 0; i < b.N; i++ { 1170 select { 1171 case <-c: 1172 case <-d: 1173 } 1174 } 1175 wg.Done() 1176 }() 1177 } 1178 for i := 0; i < b.N; i++ { 1179 for _, d := range a { 1180 d <- true 1181 } 1182 } 1183 wg.Wait() 1184 } 1185 1186 func BenchmarkChanClosed(b *testing.B) { 1187 c := make(chan struct{}) 1188 close(c) 1189 b.RunParallel(func(pb *testing.PB) { 1190 for pb.Next() { 1191 select { 1192 case <-c: 1193 default: 1194 b.Error("Unreachable") 1195 } 1196 } 1197 }) 1198 } 1199 1200 var ( 1201 alwaysFalse = false 1202 workSink = 0 1203 ) 1204 1205 func localWork(w int) { 1206 foo := 0 1207 for i := 0; i < w; i++ { 1208 foo /= (foo + 1) 1209 } 1210 if alwaysFalse { 1211 workSink += foo 1212 } 1213 }