github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/chan_test.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "runtime" 9 "sync" 10 "sync/atomic" 11 "testing" 12 "time" 13 ) 14 15 func TestChan(t *testing.T) { 16 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) 17 N := 200 18 if testing.Short() { 19 N = 20 20 } 21 for chanCap := 0; chanCap < N; chanCap++ { 22 { 23 // Ensure that receive from empty chan blocks. 24 c := make(chan int, chanCap) 25 recv1 := false 26 go func() { 27 _ = <-c 28 recv1 = true 29 }() 30 recv2 := false 31 go func() { 32 _, _ = <-c 33 recv2 = true 34 }() 35 time.Sleep(time.Millisecond) 36 if recv1 || recv2 { 37 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 38 } 39 // Ensure that non-blocking receive does not block. 40 select { 41 case _ = <-c: 42 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 43 default: 44 } 45 select { 46 case _, _ = <-c: 47 t.Fatalf("chan[%d]: receive from empty chan", chanCap) 48 default: 49 } 50 c <- 0 51 c <- 0 52 } 53 54 { 55 // Ensure that send to full chan blocks. 56 c := make(chan int, chanCap) 57 for i := 0; i < chanCap; i++ { 58 c <- i 59 } 60 sent := uint32(0) 61 go func() { 62 c <- 0 63 atomic.StoreUint32(&sent, 1) 64 }() 65 time.Sleep(time.Millisecond) 66 if atomic.LoadUint32(&sent) != 0 { 67 t.Fatalf("chan[%d]: send to full chan", chanCap) 68 } 69 // Ensure that non-blocking send does not block. 70 select { 71 case c <- 0: 72 t.Fatalf("chan[%d]: send to full chan", chanCap) 73 default: 74 } 75 <-c 76 } 77 78 { 79 // Ensure that we receive 0 from closed chan. 80 c := make(chan int, chanCap) 81 for i := 0; i < chanCap; i++ { 82 c <- i 83 } 84 close(c) 85 for i := 0; i < chanCap; i++ { 86 v := <-c 87 if v != i { 88 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 89 } 90 } 91 if v := <-c; v != 0 { 92 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0) 93 } 94 if v, ok := <-c; v != 0 || ok { 95 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false) 96 } 97 } 98 99 { 100 // Ensure that close unblocks receive. 101 c := make(chan int, chanCap) 102 done := make(chan bool) 103 go func() { 104 v, ok := <-c 105 done <- v == 0 && ok == false 106 }() 107 time.Sleep(time.Millisecond) 108 close(c) 109 if !<-done { 110 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap) 111 } 112 } 113 114 { 115 // Send 100 integers, 116 // ensure that we receive them non-corrupted in FIFO order. 117 c := make(chan int, chanCap) 118 go func() { 119 for i := 0; i < 100; i++ { 120 c <- i 121 } 122 }() 123 for i := 0; i < 100; i++ { 124 v := <-c 125 if v != i { 126 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 127 } 128 } 129 130 // Same, but using recv2. 131 go func() { 132 for i := 0; i < 100; i++ { 133 c <- i 134 } 135 }() 136 for i := 0; i < 100; i++ { 137 v, ok := <-c 138 if !ok { 139 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i) 140 } 141 if v != i { 142 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) 143 } 144 } 145 146 // Send 1000 integers in 4 goroutines, 147 // ensure that we receive what we send. 148 const P = 4 149 const L = 1000 150 for p := 0; p < P; p++ { 151 go func() { 152 for i := 0; i < L; i++ { 153 c <- i 154 } 155 }() 156 } 157 done := make(chan map[int]int) 158 for p := 0; p < P; p++ { 159 go func() { 160 recv := make(map[int]int) 161 for i := 0; i < L; i++ { 162 v := <-c 163 recv[v] = recv[v] + 1 164 } 165 done <- recv 166 }() 167 } 168 recv := make(map[int]int) 169 for p := 0; p < P; p++ { 170 for k, v := range <-done { 171 recv[k] = recv[k] + v 172 } 173 } 174 if len(recv) != L { 175 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L) 176 } 177 for _, v := range recv { 178 if v != P { 179 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P) 180 } 181 } 182 } 183 184 { 185 // Test len/cap. 186 c := make(chan int, chanCap) 187 if len(c) != 0 || cap(c) != chanCap { 188 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c)) 189 } 190 for i := 0; i < chanCap; i++ { 191 c <- i 192 } 193 if len(c) != chanCap || cap(c) != chanCap { 194 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c)) 195 } 196 } 197 198 } 199 } 200 201 func TestNonblockRecvRace(t *testing.T) { 202 n := 10000 203 if testing.Short() { 204 n = 100 205 } 206 for i := 0; i < n; i++ { 207 c := make(chan int, 1) 208 c <- 1 209 go func() { 210 select { 211 case <-c: 212 default: 213 t.Fatal("chan is not ready") 214 } 215 }() 216 close(c) 217 <-c 218 } 219 } 220 221 func TestSelfSelect(t *testing.T) { 222 // Ensure that send/recv on the same chan in select 223 // does not crash nor deadlock. 224 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 225 for _, chanCap := range []int{0, 10} { 226 var wg sync.WaitGroup 227 wg.Add(2) 228 c := make(chan int, chanCap) 229 for p := 0; p < 2; p++ { 230 p := p 231 go func() { 232 defer wg.Done() 233 for i := 0; i < 1000; i++ { 234 if p == 0 || i%2 == 0 { 235 select { 236 case c <- p: 237 case v := <-c: 238 if chanCap == 0 && v == p { 239 t.Fatalf("self receive") 240 } 241 } 242 } else { 243 select { 244 case v := <-c: 245 if chanCap == 0 && v == p { 246 t.Fatalf("self receive") 247 } 248 case c <- p: 249 } 250 } 251 } 252 }() 253 } 254 wg.Wait() 255 } 256 } 257 258 func TestSelectStress(t *testing.T) { 259 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10)) 260 var c [4]chan int 261 c[0] = make(chan int) 262 c[1] = make(chan int) 263 c[2] = make(chan int, 2) 264 c[3] = make(chan int, 3) 265 N := int(1e5) 266 if testing.Short() { 267 N /= 10 268 } 269 // There are 4 goroutines that send N values on each of the chans, 270 // + 4 goroutines that receive N values on each of the chans, 271 // + 1 goroutine that sends N values on each of the chans in a single select, 272 // + 1 goroutine that receives N values on each of the chans in a single select. 273 // All these sends, receives and selects interact chaotically at runtime, 274 // but we are careful that this whole construct does not deadlock. 275 var wg sync.WaitGroup 276 wg.Add(10) 277 for k := 0; k < 4; k++ { 278 k := k 279 go func() { 280 for i := 0; i < N; i++ { 281 c[k] <- 0 282 } 283 wg.Done() 284 }() 285 go func() { 286 for i := 0; i < N; i++ { 287 <-c[k] 288 } 289 wg.Done() 290 }() 291 } 292 go func() { 293 var n [4]int 294 c1 := c 295 for i := 0; i < 4*N; i++ { 296 select { 297 case c1[3] <- 0: 298 n[3]++ 299 if n[3] == N { 300 c1[3] = nil 301 } 302 case c1[2] <- 0: 303 n[2]++ 304 if n[2] == N { 305 c1[2] = nil 306 } 307 case c1[0] <- 0: 308 n[0]++ 309 if n[0] == N { 310 c1[0] = nil 311 } 312 case c1[1] <- 0: 313 n[1]++ 314 if n[1] == N { 315 c1[1] = nil 316 } 317 } 318 } 319 wg.Done() 320 }() 321 go func() { 322 var n [4]int 323 c1 := c 324 for i := 0; i < 4*N; i++ { 325 select { 326 case <-c1[0]: 327 n[0]++ 328 if n[0] == N { 329 c1[0] = nil 330 } 331 case <-c1[1]: 332 n[1]++ 333 if n[1] == N { 334 c1[1] = nil 335 } 336 case <-c1[2]: 337 n[2]++ 338 if n[2] == N { 339 c1[2] = nil 340 } 341 case <-c1[3]: 342 n[3]++ 343 if n[3] == N { 344 c1[3] = nil 345 } 346 } 347 } 348 wg.Done() 349 }() 350 wg.Wait() 351 } 352 353 func TestChanSendInterface(t *testing.T) { 354 type mt struct{} 355 m := &mt{} 356 c := make(chan interface{}, 1) 357 c <- m 358 select { 359 case c <- m: 360 default: 361 } 362 select { 363 case c <- m: 364 case c <- &mt{}: 365 default: 366 } 367 } 368 369 func TestPseudoRandomSend(t *testing.T) { 370 n := 100 371 for _, chanCap := range []int{0, n} { 372 c := make(chan int, chanCap) 373 l := make([]int, n) 374 var m sync.Mutex 375 m.Lock() 376 go func() { 377 for i := 0; i < n; i++ { 378 runtime.Gosched() 379 l[i] = <-c 380 } 381 m.Unlock() 382 }() 383 for i := 0; i < n; i++ { 384 select { 385 case c <- 1: 386 case c <- 0: 387 } 388 } 389 m.Lock() // wait 390 n0 := 0 391 n1 := 0 392 for _, i := range l { 393 n0 += (i + 1) % 2 394 n1 += i 395 } 396 if n0 <= n/10 || n1 <= n/10 { 397 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap) 398 } 399 } 400 } 401 402 func TestMultiConsumer(t *testing.T) { 403 const nwork = 23 404 const niter = 271828 405 406 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31} 407 408 q := make(chan int, nwork*3) 409 r := make(chan int, nwork*3) 410 411 // workers 412 var wg sync.WaitGroup 413 for i := 0; i < nwork; i++ { 414 wg.Add(1) 415 go func(w int) { 416 for v := range q { 417 // mess with the fifo-ish nature of range 418 if pn[w%len(pn)] == v { 419 runtime.Gosched() 420 } 421 r <- v 422 } 423 wg.Done() 424 }(i) 425 } 426 427 // feeder & closer 428 expect := 0 429 go func() { 430 for i := 0; i < niter; i++ { 431 v := pn[i%len(pn)] 432 expect += v 433 q <- v 434 } 435 close(q) // no more work 436 wg.Wait() // workers done 437 close(r) // ... so there can be no more results 438 }() 439 440 // consume & check 441 n := 0 442 s := 0 443 for v := range r { 444 n++ 445 s += v 446 } 447 if n != niter || s != expect { 448 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)", 449 expect, s, niter, n) 450 } 451 } 452 453 func TestShrinkStackDuringBlockedSend(t *testing.T) { 454 // make sure that channel operations still work when we are 455 // blocked on a channel send and we shrink the stack. 456 // NOTE: this test probably won't fail unless stack.c:StackDebug 457 // is set to >= 1. 458 const n = 10 459 c := make(chan int) 460 done := make(chan struct{}) 461 462 go func() { 463 for i := 0; i < n; i++ { 464 c <- i 465 // use lots of stack, briefly. 466 stackGrowthRecursive(20) 467 } 468 done <- struct{}{} 469 }() 470 471 for i := 0; i < n; i++ { 472 x := <-c 473 if x != i { 474 t.Errorf("bad channel read: want %d, got %d", i, x) 475 } 476 // Waste some time so sender can finish using lots of stack 477 // and block in channel send. 478 time.Sleep(1 * time.Millisecond) 479 // trigger GC which will shrink the stack of the sender. 480 runtime.GC() 481 } 482 <-done 483 } 484 485 func TestSelectDuplicateChannel(t *testing.T) { 486 // This test makes sure we can queue a G on 487 // the same channel multiple times. 488 c := make(chan int) 489 d := make(chan int) 490 e := make(chan int) 491 492 // goroutine A 493 go func() { 494 select { 495 case <-c: 496 case <-c: 497 case <-d: 498 } 499 e <- 9 500 }() 501 time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c 502 503 // goroutine B 504 go func() { 505 <-c 506 }() 507 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing 508 509 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. 510 <-e // A tells us it's done 511 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) 512 } 513 514 func BenchmarkChanNonblocking(b *testing.B) { 515 myc := make(chan int) 516 b.RunParallel(func(pb *testing.PB) { 517 for pb.Next() { 518 select { 519 case <-myc: 520 default: 521 } 522 } 523 }) 524 } 525 526 func BenchmarkSelectUncontended(b *testing.B) { 527 b.RunParallel(func(pb *testing.PB) { 528 myc1 := make(chan int, 1) 529 myc2 := make(chan int, 1) 530 myc1 <- 0 531 for pb.Next() { 532 select { 533 case <-myc1: 534 myc2 <- 0 535 case <-myc2: 536 myc1 <- 0 537 } 538 } 539 }) 540 } 541 542 func BenchmarkSelectSyncContended(b *testing.B) { 543 myc1 := make(chan int) 544 myc2 := make(chan int) 545 myc3 := make(chan int) 546 done := make(chan int) 547 b.RunParallel(func(pb *testing.PB) { 548 go func() { 549 for { 550 select { 551 case myc1 <- 0: 552 case myc2 <- 0: 553 case myc3 <- 0: 554 case <-done: 555 return 556 } 557 } 558 }() 559 for pb.Next() { 560 select { 561 case <-myc1: 562 case <-myc2: 563 case <-myc3: 564 } 565 } 566 }) 567 close(done) 568 } 569 570 func BenchmarkSelectAsyncContended(b *testing.B) { 571 procs := runtime.GOMAXPROCS(0) 572 myc1 := make(chan int, procs) 573 myc2 := make(chan int, procs) 574 b.RunParallel(func(pb *testing.PB) { 575 myc1 <- 0 576 for pb.Next() { 577 select { 578 case <-myc1: 579 myc2 <- 0 580 case <-myc2: 581 myc1 <- 0 582 } 583 } 584 }) 585 } 586 587 func BenchmarkSelectNonblock(b *testing.B) { 588 myc1 := make(chan int) 589 myc2 := make(chan int) 590 myc3 := make(chan int, 1) 591 myc4 := make(chan int, 1) 592 b.RunParallel(func(pb *testing.PB) { 593 for pb.Next() { 594 select { 595 case <-myc1: 596 default: 597 } 598 select { 599 case myc2 <- 0: 600 default: 601 } 602 select { 603 case <-myc3: 604 default: 605 } 606 select { 607 case myc4 <- 0: 608 default: 609 } 610 } 611 }) 612 } 613 614 func BenchmarkChanUncontended(b *testing.B) { 615 const C = 100 616 b.RunParallel(func(pb *testing.PB) { 617 myc := make(chan int, C) 618 for pb.Next() { 619 for i := 0; i < C; i++ { 620 myc <- 0 621 } 622 for i := 0; i < C; i++ { 623 <-myc 624 } 625 } 626 }) 627 } 628 629 func BenchmarkChanContended(b *testing.B) { 630 const C = 100 631 myc := make(chan int, C*runtime.GOMAXPROCS(0)) 632 b.RunParallel(func(pb *testing.PB) { 633 for pb.Next() { 634 for i := 0; i < C; i++ { 635 myc <- 0 636 } 637 for i := 0; i < C; i++ { 638 <-myc 639 } 640 } 641 }) 642 } 643 644 func BenchmarkChanSync(b *testing.B) { 645 const CallsPerSched = 1000 646 procs := 2 647 N := int32(b.N / CallsPerSched / procs * procs) 648 c := make(chan bool, procs) 649 myc := make(chan int) 650 for p := 0; p < procs; p++ { 651 go func() { 652 for { 653 i := atomic.AddInt32(&N, -1) 654 if i < 0 { 655 break 656 } 657 for g := 0; g < CallsPerSched; g++ { 658 if i%2 == 0 { 659 <-myc 660 myc <- 0 661 } else { 662 myc <- 0 663 <-myc 664 } 665 } 666 } 667 c <- true 668 }() 669 } 670 for p := 0; p < procs; p++ { 671 <-c 672 } 673 } 674 675 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) { 676 const CallsPerSched = 1000 677 procs := runtime.GOMAXPROCS(-1) 678 N := int32(b.N / CallsPerSched) 679 c := make(chan bool, 2*procs) 680 myc := make(chan int, chanSize) 681 for p := 0; p < procs; p++ { 682 go func() { 683 foo := 0 684 for atomic.AddInt32(&N, -1) >= 0 { 685 for g := 0; g < CallsPerSched; g++ { 686 for i := 0; i < localWork; i++ { 687 foo *= 2 688 foo /= 2 689 } 690 myc <- 1 691 } 692 } 693 myc <- 0 694 c <- foo == 42 695 }() 696 go func() { 697 foo := 0 698 for { 699 v := <-myc 700 if v == 0 { 701 break 702 } 703 for i := 0; i < localWork; i++ { 704 foo *= 2 705 foo /= 2 706 } 707 } 708 c <- foo == 42 709 }() 710 } 711 for p := 0; p < procs; p++ { 712 <-c 713 <-c 714 } 715 } 716 717 func BenchmarkChanProdCons0(b *testing.B) { 718 benchmarkChanProdCons(b, 0, 0) 719 } 720 721 func BenchmarkChanProdCons10(b *testing.B) { 722 benchmarkChanProdCons(b, 10, 0) 723 } 724 725 func BenchmarkChanProdCons100(b *testing.B) { 726 benchmarkChanProdCons(b, 100, 0) 727 } 728 729 func BenchmarkChanProdConsWork0(b *testing.B) { 730 benchmarkChanProdCons(b, 0, 100) 731 } 732 733 func BenchmarkChanProdConsWork10(b *testing.B) { 734 benchmarkChanProdCons(b, 10, 100) 735 } 736 737 func BenchmarkChanProdConsWork100(b *testing.B) { 738 benchmarkChanProdCons(b, 100, 100) 739 } 740 741 func BenchmarkSelectProdCons(b *testing.B) { 742 const CallsPerSched = 1000 743 procs := runtime.GOMAXPROCS(-1) 744 N := int32(b.N / CallsPerSched) 745 c := make(chan bool, 2*procs) 746 myc := make(chan int, 128) 747 myclose := make(chan bool) 748 for p := 0; p < procs; p++ { 749 go func() { 750 // Producer: sends to myc. 751 foo := 0 752 // Intended to not fire during benchmarking. 753 mytimer := time.After(time.Hour) 754 for atomic.AddInt32(&N, -1) >= 0 { 755 for g := 0; g < CallsPerSched; g++ { 756 // Model some local work. 757 for i := 0; i < 100; i++ { 758 foo *= 2 759 foo /= 2 760 } 761 select { 762 case myc <- 1: 763 case <-mytimer: 764 case <-myclose: 765 } 766 } 767 } 768 myc <- 0 769 c <- foo == 42 770 }() 771 go func() { 772 // Consumer: receives from myc. 773 foo := 0 774 // Intended to not fire during benchmarking. 775 mytimer := time.After(time.Hour) 776 loop: 777 for { 778 select { 779 case v := <-myc: 780 if v == 0 { 781 break loop 782 } 783 case <-mytimer: 784 case <-myclose: 785 } 786 // Model some local work. 787 for i := 0; i < 100; i++ { 788 foo *= 2 789 foo /= 2 790 } 791 } 792 c <- foo == 42 793 }() 794 } 795 for p := 0; p < procs; p++ { 796 <-c 797 <-c 798 } 799 } 800 801 func BenchmarkChanCreation(b *testing.B) { 802 b.RunParallel(func(pb *testing.PB) { 803 for pb.Next() { 804 myc := make(chan int, 1) 805 myc <- 0 806 <-myc 807 } 808 }) 809 } 810 811 func BenchmarkChanSem(b *testing.B) { 812 type Empty struct{} 813 myc := make(chan Empty, runtime.GOMAXPROCS(0)) 814 b.RunParallel(func(pb *testing.PB) { 815 for pb.Next() { 816 myc <- Empty{} 817 <-myc 818 } 819 }) 820 }