github.com/comwrg/go/src@v0.0.0-20220319063731-c238d0440370/runtime/chan_test.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "github.com/comwrg/go/src/internal/testenv" 9 "math" 10 "runtime" 11 "sync" 12 "sync/atomic" 13 "testing" 14 "time" 15 ) 16 17 func TestChan(t *testing.T) { 18 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) 19 N := 200 20 if testing.Short() { 21 N = 20 22 } 23 for chanCap := 0; chanCap < N; chanCap++ { 24 { 25 // Ensure that receive from empty chan blocks. 26 c := make(chan int, chanCap) 27 recv1 := false 28 go func() { 29 _ = <-c 30 recv1 = true 31 }() 32 recv2 := false 33 go func() { 34 _, _ = <-c 35 recv2 = true 36 }() 37 time.Sleep(time.Millisecond) 38 if recv1 || recv2 { 39 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 40 } 41 // Ensure that non-blocking receive does not block. 42 select { 43 case _ = <-c: 44 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 45 default: 46 } 47 select { 48 case _, _ = <-c: 49 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 50 default: 51 } 52 c <- 0 53 c <- 0 54 } 55 56 { 57 // Ensure that send to full chan blocks. 58 c := make(chan int, chanCap) 59 for i := 0; i < chanCap; i++ { 60 c <- i 61 } 62 sent := uint32(0) 63 go func() { 64 c <- 0 65 atomic.StoreUint32(&sent, 1) 66 }() 67 time.Sleep(time.Millisecond) 68 if atomic.LoadUint32(&sent) != 0 { 69 t.Fatalf("chan[%d]: send to full chan", chanCap) 70 } 71 // Ensure that non-blocking send does not block. 72 select { 73 case c <- 0: 74 t.Fatalf("chan[%d]: send to full chan", chanCap) 75 default: 76 } 77 <-c 78 } 79 80 { 81 // Ensure that we receive 0 from closed chan. 82 c := make(chan int, chanCap) 83 for i := 0; i < chanCap; i++ { 84 c <- i 85 } 86 close(c) 87 for i := 0; i < chanCap; i++ { 88 v := <-c 89 if v != i { 90 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 91 } 92 } 93 if v := <-c; v != 0 { 94 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0) 95 } 96 if v, ok := <-c; v != 0 || ok { 97 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false) 98 } 99 } 100 101 { 102 // Ensure that close unblocks receive. 103 c := make(chan int, chanCap) 104 done := make(chan bool) 105 go func() { 106 v, ok := <-c 107 done <- v == 0 && ok == false 108 }() 109 time.Sleep(time.Millisecond) 110 close(c) 111 if !<-done { 112 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap) 113 } 114 } 115 116 { 117 // Send 100 integers, 118 // ensure that we receive them non-corrupted in FIFO order. 119 c := make(chan int, chanCap) 120 go func() { 121 for i := 0; i < 100; i++ { 122 c <- i 123 } 124 }() 125 for i := 0; i < 100; i++ { 126 v := <-c 127 if v != i { 128 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 129 } 130 } 131 132 // Same, but using recv2. 133 go func() { 134 for i := 0; i < 100; i++ { 135 c <- i 136 } 137 }() 138 for i := 0; i < 100; i++ { 139 v, ok := <-c 140 if !ok { 141 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i) 142 } 143 if v != i { 144 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 145 } 146 } 147 148 // Send 1000 integers in 4 goroutines, 149 // ensure that we receive what we send. 150 const P = 4 151 const L = 1000 152 for p := 0; p < P; p++ { 153 go func() { 154 for i := 0; i < L; i++ { 155 c <- i 156 } 157 }() 158 } 159 done := make(chan map[int]int) 160 for p := 0; p < P; p++ { 161 go func() { 162 recv := make(map[int]int) 163 for i := 0; i < L; i++ { 164 v := <-c 165 recv[v] = recv[v] + 1 166 } 167 done <- recv 168 }() 169 } 170 recv := make(map[int]int) 171 for p := 0; p < P; p++ { 172 for k, v := range <-done { 173 recv[k] = recv[k] + v 174 } 175 } 176 if len(recv) != L { 177 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L) 178 } 179 for _, v := range recv { 180 if v != P { 181 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P) 182 } 183 } 184 } 185 186 { 187 // Test len/cap. 188 c := make(chan int, chanCap) 189 if len(c) != 0 || cap(c) != chanCap { 190 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c)) 191 } 192 for i := 0; i < chanCap; i++ { 193 c <- i 194 } 195 if len(c) != chanCap || cap(c) != chanCap { 196 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c)) 197 } 198 } 199 200 } 201 } 202 203 func TestNonblockRecvRace(t *testing.T) { 204 n := 10000 205 if testing.Short() { 206 n = 100 207 } 208 for i := 0; i < n; i++ { 209 c := make(chan int, 1) 210 c <- 1 211 go func() { 212 select { 213 case <-c: 214 default: 215 t.Error("chan is not ready") 216 } 217 }() 218 close(c) 219 <-c 220 if t.Failed() { 221 return 222 } 223 } 224 } 225 226 // This test checks that select acts on the state of the channels at one 227 // moment in the execution, not over a smeared time window. 228 // In the test, one goroutine does: 229 // create c1, c2 230 // make c1 ready for receiving 231 // create second goroutine 232 // make c2 ready for receiving 233 // make c1 no longer ready for receiving (if possible) 234 // The second goroutine does a non-blocking select receiving from c1 and c2. 235 // From the time the second goroutine is created, at least one of c1 and c2 236 // is always ready for receiving, so the select in the second goroutine must 237 // always receive from one or the other. It must never execute the default case. 238 func TestNonblockSelectRace(t *testing.T) { 239 n := 100000 240 if testing.Short() { 241 n = 1000 242 } 243 done := make(chan bool, 1) 244 for i := 0; i < n; i++ { 245 c1 := make(chan int, 1) 246 c2 := make(chan int, 1) 247 c1 <- 1 248 go func() { 249 select { 250 case <-c1: 251 case <-c2: 252 default: 253 done <- false 254 return 255 } 256 done <- true 257 }() 258 c2 <- 1 259 select { 260 case <-c1: 261 default: 262 } 263 if !<-done { 264 t.Fatal("no chan is ready") 265 } 266 } 267 } 268 269 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1. 270 func TestNonblockSelectRace2(t *testing.T) { 271 n := 100000 272 if testing.Short() { 273 n = 1000 274 } 275 done := make(chan bool, 1) 276 for i := 0; i < n; i++ { 277 c1 := make(chan int, 1) 278 c2 := make(chan int) 279 c1 <- 1 280 go func() { 281 select { 282 case <-c1: 283 case <-c2: 284 default: 285 done <- false 286 return 287 } 288 done <- true 289 }() 290 close(c2) 291 select { 292 case <-c1: 293 default: 294 } 295 if !<-done { 296 t.Fatal("no chan is ready") 297 } 298 } 299 } 300 301 func TestSelfSelect(t *testing.T) { 302 // Ensure that send/recv on the same chan in select 303 // does not crash nor deadlock. 304 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 305 for _, chanCap := range []int{0, 10} { 306 var wg sync.WaitGroup 307 wg.Add(2) 308 c := make(chan int, chanCap) 309 for p := 0; p < 2; p++ { 310 p := p 311 go func() { 312 defer wg.Done() 313 for i := 0; i < 1000; i++ { 314 if p == 0 || i%2 == 0 { 315 select { 316 case c <- p: 317 case v := <-c: 318 if chanCap == 0 && v == p { 319 t.Errorf("self receive") 320 return 321 } 322 } 323 } else { 324 select { 325 case v := <-c: 326 if chanCap == 0 && v == p { 327 t.Errorf("self receive") 328 return 329 } 330 case c <- p: 331 } 332 } 333 } 334 }() 335 } 336 wg.Wait() 337 } 338 } 339 340 func TestSelectStress(t *testing.T) { 341 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10)) 342 var c [4]chan int 343 c[0] = make(chan int) 344 c[1] = make(chan int) 345 c[2] = make(chan int, 2) 346 c[3] = make(chan int, 3) 347 N := int(1e5) 348 if testing.Short() { 349 N /= 10 350 } 351 // There are 4 goroutines that send N values on each of the chans, 352 // + 4 goroutines that receive N values on each of the chans, 353 // + 1 goroutine that sends N values on each of the chans in a single select, 354 // + 1 goroutine that receives N values on each of the chans in a single select. 355 // All these sends, receives and selects interact chaotically at runtime, 356 // but we are careful that this whole construct does not deadlock. 357 var wg sync.WaitGroup 358 wg.Add(10) 359 for k := 0; k < 4; k++ { 360 k := k 361 go func() { 362 for i := 0; i < N; i++ { 363 c[k] <- 0 364 } 365 wg.Done() 366 }() 367 go func() { 368 for i := 0; i < N; i++ { 369 <-c[k] 370 } 371 wg.Done() 372 }() 373 } 374 go func() { 375 var n [4]int 376 c1 := c 377 for i := 0; i < 4*N; i++ { 378 select { 379 case c1[3] <- 0: 380 n[3]++ 381 if n[3] == N { 382 c1[3] = nil 383 } 384 case c1[2] <- 0: 385 n[2]++ 386 if n[2] == N { 387 c1[2] = nil 388 } 389 case c1[0] <- 0: 390 n[0]++ 391 if n[0] == N { 392 c1[0] = nil 393 } 394 case c1[1] <- 0: 395 n[1]++ 396 if n[1] == N { 397 c1[1] = nil 398 } 399 } 400 } 401 wg.Done() 402 }() 403 go func() { 404 var n [4]int 405 c1 := c 406 for i := 0; i < 4*N; i++ { 407 select { 408 case <-c1[0]: 409 n[0]++ 410 if n[0] == N { 411 c1[0] = nil 412 } 413 case <-c1[1]: 414 n[1]++ 415 if n[1] == N { 416 c1[1] = nil 417 } 418 case <-c1[2]: 419 n[2]++ 420 if n[2] == N { 421 c1[2] = nil 422 } 423 case <-c1[3]: 424 n[3]++ 425 if n[3] == N { 426 c1[3] = nil 427 } 428 } 429 } 430 wg.Done() 431 }() 432 wg.Wait() 433 } 434 435 func TestSelectFairness(t *testing.T) { 436 const trials = 10000 437 if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" { 438 testenv.SkipFlaky(t, 22047) 439 } 440 c1 := make(chan byte, trials+1) 441 c2 := make(chan byte, trials+1) 442 for i := 0; i < trials+1; i++ { 443 c1 <- 1 444 c2 <- 2 445 } 446 c3 := make(chan byte) 447 c4 := make(chan byte) 448 out := make(chan byte) 449 done := make(chan byte) 450 var wg sync.WaitGroup 451 wg.Add(1) 452 go func() { 453 defer wg.Done() 454 for { 455 var b byte 456 select { 457 case b = <-c3: 458 case b = <-c4: 459 case b = <-c1: 460 case b = <-c2: 461 } 462 select { 463 case out <- b: 464 case <-done: 465 return 466 } 467 } 468 }() 469 cnt1, cnt2 := 0, 0 470 for i := 0; i < trials; i++ { 471 switch b := <-out; b { 472 case 1: 473 cnt1++ 474 case 2: 475 cnt2++ 476 default: 477 t.Fatalf("unexpected value %d on channel", b) 478 } 479 } 480 // If the select in the goroutine is fair, 481 // cnt1 and cnt2 should be about the same value. 482 // With 10,000 trials, the expected margin of error at 483 // a confidence level of six nines is 4.891676 / (2 * Sqrt(10000)). 484 r := float64(cnt1) / trials 485 e := math.Abs(r - 0.5) 486 t.Log(cnt1, cnt2, r, e) 487 if e > 4.891676/(2*math.Sqrt(trials)) { 488 t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2) 489 } 490 close(done) 491 wg.Wait() 492 } 493 494 func TestChanSendInterface(t *testing.T) { 495 type mt struct{} 496 m := &mt{} 497 c := make(chan interface{}, 1) 498 c <- m 499 select { 500 case c <- m: 501 default: 502 } 503 select { 504 case c <- m: 505 case c <- &mt{}: 506 default: 507 } 508 } 509 510 func TestPseudoRandomSend(t *testing.T) { 511 n := 100 512 for _, chanCap := range []int{0, n} { 513 c := make(chan int, chanCap) 514 l := make([]int, n) 515 var m sync.Mutex 516 m.Lock() 517 go func() { 518 for i := 0; i < n; i++ { 519 runtime.Gosched() 520 l[i] = <-c 521 } 522 m.Unlock() 523 }() 524 for i := 0; i < n; i++ { 525 select { 526 case c <- 1: 527 case c <- 0: 528 } 529 } 530 m.Lock() // wait 531 n0 := 0 532 n1 := 0 533 for _, i := range l { 534 n0 += (i + 1) % 2 535 n1 += i 536 } 537 if n0 <= n/10 || n1 <= n/10 { 538 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap) 539 } 540 } 541 } 542 543 func TestMultiConsumer(t *testing.T) { 544 const nwork = 23 545 const niter = 271828 546 547 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31} 548 549 q := make(chan int, nwork*3) 550 r := make(chan int, nwork*3) 551 552 // workers 553 var wg sync.WaitGroup 554 for i := 0; i < nwork; i++ { 555 wg.Add(1) 556 go func(w int) { 557 for v := range q { 558 // mess with the fifo-ish nature of range 559 if pn[w%len(pn)] == v { 560 runtime.Gosched() 561 } 562 r <- v 563 } 564 wg.Done() 565 }(i) 566 } 567 568 // feeder & closer 569 expect := 0 570 go func() { 571 for i := 0; i < niter; i++ { 572 v := pn[i%len(pn)] 573 expect += v 574 q <- v 575 } 576 close(q) // no more work 577 wg.Wait() // workers done 578 close(r) // ... so there can be no more results 579 }() 580 581 // consume & check 582 n := 0 583 s := 0 584 for v := range r { 585 n++ 586 s += v 587 } 588 if n != niter || s != expect { 589 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)", 590 expect, s, niter, n) 591 } 592 } 593 594 func TestShrinkStackDuringBlockedSend(t *testing.T) { 595 // make sure that channel operations still work when we are 596 // blocked on a channel send and we shrink the stack. 597 // NOTE: this test probably won't fail unless stack1.go:stackDebug 598 // is set to >= 1. 599 const n = 10 600 c := make(chan int) 601 done := make(chan struct{}) 602 603 go func() { 604 for i := 0; i < n; i++ { 605 c <- i 606 // use lots of stack, briefly. 607 stackGrowthRecursive(20) 608 } 609 done <- struct{}{} 610 }() 611 612 for i := 0; i < n; i++ { 613 x := <-c 614 if x != i { 615 t.Errorf("bad channel read: want %d, got %d", i, x) 616 } 617 // Waste some time so sender can finish using lots of stack 618 // and block in channel send. 619 time.Sleep(1 * time.Millisecond) 620 // trigger GC which will shrink the stack of the sender. 621 runtime.GC() 622 } 623 <-done 624 } 625 626 func TestNoShrinkStackWhileParking(t *testing.T) { 627 // The goal of this test is to trigger a "racy sudog adjustment" 628 // throw. Basically, there's a window between when a goroutine 629 // becomes available for preemption for stack scanning (and thus, 630 // stack shrinking) but before the goroutine has fully parked on a 631 // channel. See issue 40641 for more details on the problem. 632 // 633 // The way we try to induce this failure is to set up two 634 // goroutines: a sender and a receiver that communicate across 635 // a channel. We try to set up a situation where the sender 636 // grows its stack temporarily then *fully* blocks on a channel 637 // often. Meanwhile a GC is triggered so that we try to get a 638 // mark worker to shrink the sender's stack and race with the 639 // sender parking. 640 // 641 // Unfortunately the race window here is so small that we 642 // either need a ridiculous number of iterations, or we add 643 // "usleep(1000)" to park_m, just before the unlockf call. 644 const n = 10 645 send := func(c chan<- int, done chan struct{}) { 646 for i := 0; i < n; i++ { 647 c <- i 648 // Use lots of stack briefly so that 649 // the GC is going to want to shrink us 650 // when it scans us. Make sure not to 651 // do any function calls otherwise 652 // in order to avoid us shrinking ourselves 653 // when we're preempted. 654 stackGrowthRecursive(20) 655 } 656 done <- struct{}{} 657 } 658 recv := func(c <-chan int, done chan struct{}) { 659 for i := 0; i < n; i++ { 660 // Sleep here so that the sender always 661 // fully blocks. 662 time.Sleep(10 * time.Microsecond) 663 <-c 664 } 665 done <- struct{}{} 666 } 667 for i := 0; i < n*20; i++ { 668 c := make(chan int) 669 done := make(chan struct{}) 670 go recv(c, done) 671 go send(c, done) 672 // Wait a little bit before triggering 673 // the GC to make sure the sender and 674 // receiver have gotten into their groove. 675 time.Sleep(50 * time.Microsecond) 676 runtime.GC() 677 <-done 678 <-done 679 } 680 } 681 682 func TestSelectDuplicateChannel(t *testing.T) { 683 // This test makes sure we can queue a G on 684 // the same channel multiple times. 685 c := make(chan int) 686 d := make(chan int) 687 e := make(chan int) 688 689 // goroutine A 690 go func() { 691 select { 692 case <-c: 693 case <-c: 694 case <-d: 695 } 696 e <- 9 697 }() 698 time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c 699 700 // goroutine B 701 go func() { 702 <-c 703 }() 704 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing 705 706 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. 707 <-e // A tells us it's done 708 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) 709 } 710 711 func TestSelectStackAdjust(t *testing.T) { 712 // Test that channel receive slots that contain local stack 713 // pointers are adjusted correctly by stack shrinking. 714 c := make(chan *int) 715 d := make(chan *int) 716 ready1 := make(chan bool) 717 ready2 := make(chan bool) 718 719 f := func(ready chan bool, dup bool) { 720 // Temporarily grow the stack to 10K. 721 stackGrowthRecursive((10 << 10) / (128 * 8)) 722 723 // We're ready to trigger GC and stack shrink. 724 ready <- true 725 726 val := 42 727 var cx *int 728 cx = &val 729 730 var c2 chan *int 731 var d2 chan *int 732 if dup { 733 c2 = c 734 d2 = d 735 } 736 737 // Receive from d. cx won't be affected. 738 select { 739 case cx = <-c: 740 case <-c2: 741 case <-d: 742 case <-d2: 743 } 744 745 // Check that pointer in cx was adjusted correctly. 746 if cx != &val { 747 t.Error("cx no longer points to val") 748 } else if val != 42 { 749 t.Error("val changed") 750 } else { 751 *cx = 43 752 if val != 43 { 753 t.Error("changing *cx failed to change val") 754 } 755 } 756 ready <- true 757 } 758 759 go f(ready1, false) 760 go f(ready2, true) 761 762 // Let the goroutines get into the select. 763 <-ready1 764 <-ready2 765 time.Sleep(10 * time.Millisecond) 766 767 // Force concurrent GC to shrink the stacks. 768 runtime.GC() 769 770 // Wake selects. 771 close(d) 772 <-ready1 773 <-ready2 774 } 775 776 type struct0 struct{} 777 778 func BenchmarkMakeChan(b *testing.B) { 779 b.Run("Byte", func(b *testing.B) { 780 var x chan byte 781 for i := 0; i < b.N; i++ { 782 x = make(chan byte, 8) 783 } 784 close(x) 785 }) 786 b.Run("Int", func(b *testing.B) { 787 var x chan int 788 for i := 0; i < b.N; i++ { 789 x = make(chan int, 8) 790 } 791 close(x) 792 }) 793 b.Run("Ptr", func(b *testing.B) { 794 var x chan *byte 795 for i := 0; i < b.N; i++ { 796 x = make(chan *byte, 8) 797 } 798 close(x) 799 }) 800 b.Run("Struct", func(b *testing.B) { 801 b.Run("0", func(b *testing.B) { 802 var x chan struct0 803 for i := 0; i < b.N; i++ { 804 x = make(chan struct0, 8) 805 } 806 close(x) 807 }) 808 b.Run("32", func(b *testing.B) { 809 var x chan struct32 810 for i := 0; i < b.N; i++ { 811 x = make(chan struct32, 8) 812 } 813 close(x) 814 }) 815 b.Run("40", func(b *testing.B) { 816 var x chan struct40 817 for i := 0; i < b.N; i++ { 818 x = make(chan struct40, 8) 819 } 820 close(x) 821 }) 822 }) 823 } 824 825 func BenchmarkChanNonblocking(b *testing.B) { 826 myc := make(chan int) 827 b.RunParallel(func(pb *testing.PB) { 828 for pb.Next() { 829 select { 830 case <-myc: 831 default: 832 } 833 } 834 }) 835 } 836 837 func BenchmarkSelectUncontended(b *testing.B) { 838 b.RunParallel(func(pb *testing.PB) { 839 myc1 := make(chan int, 1) 840 myc2 := make(chan int, 1) 841 myc1 <- 0 842 for pb.Next() { 843 select { 844 case <-myc1: 845 myc2 <- 0 846 case <-myc2: 847 myc1 <- 0 848 } 849 } 850 }) 851 } 852 853 func BenchmarkSelectSyncContended(b *testing.B) { 854 myc1 := make(chan int) 855 myc2 := make(chan int) 856 myc3 := make(chan int) 857 done := make(chan int) 858 b.RunParallel(func(pb *testing.PB) { 859 go func() { 860 for { 861 select { 862 case myc1 <- 0: 863 case myc2 <- 0: 864 case myc3 <- 0: 865 case <-done: 866 return 867 } 868 } 869 }() 870 for pb.Next() { 871 select { 872 case <-myc1: 873 case <-myc2: 874 case <-myc3: 875 } 876 } 877 }) 878 close(done) 879 } 880 881 func BenchmarkSelectAsyncContended(b *testing.B) { 882 procs := runtime.GOMAXPROCS(0) 883 myc1 := make(chan int, procs) 884 myc2 := make(chan int, procs) 885 b.RunParallel(func(pb *testing.PB) { 886 myc1 <- 0 887 for pb.Next() { 888 select { 889 case <-myc1: 890 myc2 <- 0 891 case <-myc2: 892 myc1 <- 0 893 } 894 } 895 }) 896 } 897 898 func BenchmarkSelectNonblock(b *testing.B) { 899 myc1 := make(chan int) 900 myc2 := make(chan int) 901 myc3 := make(chan int, 1) 902 myc4 := make(chan int, 1) 903 b.RunParallel(func(pb *testing.PB) { 904 for pb.Next() { 905 select { 906 case <-myc1: 907 default: 908 } 909 select { 910 case myc2 <- 0: 911 default: 912 } 913 select { 914 case <-myc3: 915 default: 916 } 917 select { 918 case myc4 <- 0: 919 default: 920 } 921 } 922 }) 923 } 924 925 func BenchmarkChanUncontended(b *testing.B) { 926 const C = 100 927 b.RunParallel(func(pb *testing.PB) { 928 myc := make(chan int, C) 929 for pb.Next() { 930 for i := 0; i < C; i++ { 931 myc <- 0 932 } 933 for i := 0; i < C; i++ { 934 <-myc 935 } 936 } 937 }) 938 } 939 940 func BenchmarkChanContended(b *testing.B) { 941 const C = 100 942 myc := make(chan int, C*runtime.GOMAXPROCS(0)) 943 b.RunParallel(func(pb *testing.PB) { 944 for pb.Next() { 945 for i := 0; i < C; i++ { 946 myc <- 0 947 } 948 for i := 0; i < C; i++ { 949 <-myc 950 } 951 } 952 }) 953 } 954 955 func benchmarkChanSync(b *testing.B, work int) { 956 const CallsPerSched = 1000 957 procs := 2 958 N := int32(b.N / CallsPerSched / procs * procs) 959 c := make(chan bool, procs) 960 myc := make(chan int) 961 for p := 0; p < procs; p++ { 962 go func() { 963 for { 964 i := atomic.AddInt32(&N, -1) 965 if i < 0 { 966 break 967 } 968 for g := 0; g < CallsPerSched; g++ { 969 if i%2 == 0 { 970 <-myc 971 localWork(work) 972 myc <- 0 973 localWork(work) 974 } else { 975 myc <- 0 976 localWork(work) 977 <-myc 978 localWork(work) 979 } 980 } 981 } 982 c <- true 983 }() 984 } 985 for p := 0; p < procs; p++ { 986 <-c 987 } 988 } 989 990 func BenchmarkChanSync(b *testing.B) { 991 benchmarkChanSync(b, 0) 992 } 993 994 func BenchmarkChanSyncWork(b *testing.B) { 995 benchmarkChanSync(b, 1000) 996 } 997 998 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) { 999 const CallsPerSched = 1000 1000 procs := runtime.GOMAXPROCS(-1) 1001 N := int32(b.N / CallsPerSched) 1002 c := make(chan bool, 2*procs) 1003 myc := make(chan int, chanSize) 1004 for p := 0; p < procs; p++ { 1005 go func() { 1006 foo := 0 1007 for atomic.AddInt32(&N, -1) >= 0 { 1008 for g := 0; g < CallsPerSched; g++ { 1009 for i := 0; i < localWork; i++ { 1010 foo *= 2 1011 foo /= 2 1012 } 1013 myc <- 1 1014 } 1015 } 1016 myc <- 0 1017 c <- foo == 42 1018 }() 1019 go func() { 1020 foo := 0 1021 for { 1022 v := <-myc 1023 if v == 0 { 1024 break 1025 } 1026 for i := 0; i < localWork; i++ { 1027 foo *= 2 1028 foo /= 2 1029 } 1030 } 1031 c <- foo == 42 1032 }() 1033 } 1034 for p := 0; p < procs; p++ { 1035 <-c 1036 <-c 1037 } 1038 } 1039 1040 func BenchmarkChanProdCons0(b *testing.B) { 1041 benchmarkChanProdCons(b, 0, 0) 1042 } 1043 1044 func BenchmarkChanProdCons10(b *testing.B) { 1045 benchmarkChanProdCons(b, 10, 0) 1046 } 1047 1048 func BenchmarkChanProdCons100(b *testing.B) { 1049 benchmarkChanProdCons(b, 100, 0) 1050 } 1051 1052 func BenchmarkChanProdConsWork0(b *testing.B) { 1053 benchmarkChanProdCons(b, 0, 100) 1054 } 1055 1056 func BenchmarkChanProdConsWork10(b *testing.B) { 1057 benchmarkChanProdCons(b, 10, 100) 1058 } 1059 1060 func BenchmarkChanProdConsWork100(b *testing.B) { 1061 benchmarkChanProdCons(b, 100, 100) 1062 } 1063 1064 func BenchmarkSelectProdCons(b *testing.B) { 1065 const CallsPerSched = 1000 1066 procs := runtime.GOMAXPROCS(-1) 1067 N := int32(b.N / CallsPerSched) 1068 c := make(chan bool, 2*procs) 1069 myc := make(chan int, 128) 1070 myclose := make(chan bool) 1071 for p := 0; p < procs; p++ { 1072 go func() { 1073 // Producer: sends to myc. 1074 foo := 0 1075 // Intended to not fire during benchmarking. 1076 mytimer := time.After(time.Hour) 1077 for atomic.AddInt32(&N, -1) >= 0 { 1078 for g := 0; g < CallsPerSched; g++ { 1079 // Model some local work. 1080 for i := 0; i < 100; i++ { 1081 foo *= 2 1082 foo /= 2 1083 } 1084 select { 1085 case myc <- 1: 1086 case <-mytimer: 1087 case <-myclose: 1088 } 1089 } 1090 } 1091 myc <- 0 1092 c <- foo == 42 1093 }() 1094 go func() { 1095 // Consumer: receives from myc. 1096 foo := 0 1097 // Intended to not fire during benchmarking. 1098 mytimer := time.After(time.Hour) 1099 loop: 1100 for { 1101 select { 1102 case v := <-myc: 1103 if v == 0 { 1104 break loop 1105 } 1106 case <-mytimer: 1107 case <-myclose: 1108 } 1109 // Model some local work. 1110 for i := 0; i < 100; i++ { 1111 foo *= 2 1112 foo /= 2 1113 } 1114 } 1115 c <- foo == 42 1116 }() 1117 } 1118 for p := 0; p < procs; p++ { 1119 <-c 1120 <-c 1121 } 1122 } 1123 1124 func BenchmarkChanCreation(b *testing.B) { 1125 b.RunParallel(func(pb *testing.PB) { 1126 for pb.Next() { 1127 myc := make(chan int, 1) 1128 myc <- 0 1129 <-myc 1130 } 1131 }) 1132 } 1133 1134 func BenchmarkChanSem(b *testing.B) { 1135 type Empty struct{} 1136 myc := make(chan Empty, runtime.GOMAXPROCS(0)) 1137 b.RunParallel(func(pb *testing.PB) { 1138 for pb.Next() { 1139 myc <- Empty{} 1140 <-myc 1141 } 1142 }) 1143 } 1144 1145 func BenchmarkChanPopular(b *testing.B) { 1146 const n = 1000 1147 c := make(chan bool) 1148 var a []chan bool 1149 var wg sync.WaitGroup 1150 wg.Add(n) 1151 for j := 0; j < n; j++ { 1152 d := make(chan bool) 1153 a = append(a, d) 1154 go func() { 1155 for i := 0; i < b.N; i++ { 1156 select { 1157 case <-c: 1158 case <-d: 1159 } 1160 } 1161 wg.Done() 1162 }() 1163 } 1164 for i := 0; i < b.N; i++ { 1165 for _, d := range a { 1166 d <- true 1167 } 1168 } 1169 wg.Wait() 1170 } 1171 1172 func BenchmarkChanClosed(b *testing.B) { 1173 c := make(chan struct{}) 1174 close(c) 1175 b.RunParallel(func(pb *testing.PB) { 1176 for pb.Next() { 1177 select { 1178 case <-c: 1179 default: 1180 b.Error("Unreachable") 1181 } 1182 } 1183 }) 1184 } 1185 1186 var ( 1187 alwaysFalse = false 1188 workSink = 0 1189 ) 1190 1191 func localWork(w int) { 1192 foo := 0 1193 for i := 0; i < w; i++ { 1194 foo /= (foo + 1) 1195 } 1196 if alwaysFalse { 1197 workSink += foo 1198 } 1199 }