github.com/aloncn/graphics-go@v0.0.1/src/runtime/proc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "math" 9 "net" 10 "runtime" 11 "runtime/debug" 12 "strings" 13 "sync" 14 "sync/atomic" 15 "syscall" 16 "testing" 17 "time" 18 ) 19 20 var stop = make(chan bool, 1) 21 22 func perpetuumMobile() { 23 select { 24 case <-stop: 25 default: 26 go perpetuumMobile() 27 } 28 } 29 30 func TestStopTheWorldDeadlock(t *testing.T) { 31 if testing.Short() { 32 t.Skip("skipping during short test") 33 } 34 maxprocs := runtime.GOMAXPROCS(3) 35 compl := make(chan bool, 2) 36 go func() { 37 for i := 0; i != 1000; i += 1 { 38 runtime.GC() 39 } 40 compl <- true 41 }() 42 go func() { 43 for i := 0; i != 1000; i += 1 { 44 runtime.GOMAXPROCS(3) 45 } 46 compl <- true 47 }() 48 go perpetuumMobile() 49 <-compl 50 <-compl 51 stop <- true 52 runtime.GOMAXPROCS(maxprocs) 53 } 54 55 func TestYieldProgress(t *testing.T) { 56 testYieldProgress(t, false) 57 } 58 59 func TestYieldLockedProgress(t *testing.T) { 60 testYieldProgress(t, true) 61 } 62 63 func testYieldProgress(t *testing.T, locked bool) { 64 c := make(chan bool) 65 cack := make(chan bool) 66 go func() { 67 if locked { 68 runtime.LockOSThread() 69 } 70 for { 71 select { 72 case <-c: 73 cack <- true 74 return 75 default: 76 runtime.Gosched() 77 } 78 } 79 }() 80 time.Sleep(10 * time.Millisecond) 81 c <- true 82 <-cack 83 } 84 85 func TestYieldLocked(t *testing.T) { 86 const N = 10 87 c := make(chan bool) 88 go func() { 89 runtime.LockOSThread() 90 for i := 0; i < N; i++ { 91 runtime.Gosched() 92 time.Sleep(time.Millisecond) 93 } 94 c <- true 95 // runtime.UnlockOSThread() is deliberately omitted 96 }() 97 <-c 98 } 99 100 func TestGoroutineParallelism(t *testing.T) { 101 if runtime.NumCPU() == 1 { 102 // Takes too long, too easy to deadlock, etc. 103 t.Skip("skipping on uniprocessor") 104 } 105 P := 4 106 N := 10 107 if testing.Short() { 108 P = 3 109 N = 3 110 } 111 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 112 // If runtime triggers a forced GC during this test then it will deadlock, 113 // since the goroutines can't be stopped/preempted. 114 // Disable GC for this test (see issue #10958). 115 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 116 for try := 0; try < N; try++ { 117 done := make(chan bool) 118 x := uint32(0) 119 for p := 0; p < P; p++ { 120 // Test that all P goroutines are scheduled at the same time 121 go func(p int) { 122 for i := 0; i < 3; i++ { 123 expected := uint32(P*i + p) 124 for atomic.LoadUint32(&x) != expected { 125 } 126 atomic.StoreUint32(&x, expected+1) 127 } 128 done <- true 129 }(p) 130 } 131 for p := 0; p < P; p++ { 132 <-done 133 } 134 } 135 } 136 137 // Test that all runnable goroutines are scheduled at the same time. 138 func TestGoroutineParallelism2(t *testing.T) { 139 //testGoroutineParallelism2(t, false, false) 140 testGoroutineParallelism2(t, true, false) 141 testGoroutineParallelism2(t, false, true) 142 testGoroutineParallelism2(t, true, true) 143 } 144 145 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) { 146 if runtime.NumCPU() == 1 { 147 // Takes too long, too easy to deadlock, etc. 148 t.Skip("skipping on uniprocessor") 149 } 150 P := 4 151 N := 10 152 if testing.Short() { 153 N = 3 154 } 155 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 156 // If runtime triggers a forced GC during this test then it will deadlock, 157 // since the goroutines can't be stopped/preempted. 158 // Disable GC for this test (see issue #10958). 159 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 160 for try := 0; try < N; try++ { 161 if load { 162 // Create P goroutines and wait until they all run. 163 // When we run the actual test below, worker threads 164 // running the goroutines will start parking. 165 done := make(chan bool) 166 x := uint32(0) 167 for p := 0; p < P; p++ { 168 go func() { 169 if atomic.AddUint32(&x, 1) == uint32(P) { 170 done <- true 171 return 172 } 173 for atomic.LoadUint32(&x) != uint32(P) { 174 } 175 }() 176 } 177 <-done 178 } 179 if netpoll { 180 // Enable netpoller, affects schedler behavior. 181 ln, err := net.Listen("tcp", "localhost:0") 182 if err != nil { 183 defer ln.Close() // yup, defer in a loop 184 } 185 } 186 done := make(chan bool) 187 x := uint32(0) 188 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism. 189 for p := 0; p < P/2; p++ { 190 go func(p int) { 191 for p2 := 0; p2 < 2; p2++ { 192 go func(p2 int) { 193 for i := 0; i < 3; i++ { 194 expected := uint32(P*i + p*2 + p2) 195 for atomic.LoadUint32(&x) != expected { 196 } 197 atomic.StoreUint32(&x, expected+1) 198 } 199 done <- true 200 }(p2) 201 } 202 }(p) 203 } 204 for p := 0; p < P; p++ { 205 <-done 206 } 207 } 208 } 209 210 func TestBlockLocked(t *testing.T) { 211 const N = 10 212 c := make(chan bool) 213 go func() { 214 runtime.LockOSThread() 215 for i := 0; i < N; i++ { 216 c <- true 217 } 218 runtime.UnlockOSThread() 219 }() 220 for i := 0; i < N; i++ { 221 <-c 222 } 223 } 224 225 func TestTimerFairness(t *testing.T) { 226 done := make(chan bool) 227 c := make(chan bool) 228 for i := 0; i < 2; i++ { 229 go func() { 230 for { 231 select { 232 case c <- true: 233 case <-done: 234 return 235 } 236 } 237 }() 238 } 239 240 timer := time.After(20 * time.Millisecond) 241 for { 242 select { 243 case <-c: 244 case <-timer: 245 close(done) 246 return 247 } 248 } 249 } 250 251 func TestTimerFairness2(t *testing.T) { 252 done := make(chan bool) 253 c := make(chan bool) 254 for i := 0; i < 2; i++ { 255 go func() { 256 timer := time.After(20 * time.Millisecond) 257 var buf [1]byte 258 for { 259 syscall.Read(0, buf[0:0]) 260 select { 261 case c <- true: 262 case <-c: 263 case <-timer: 264 done <- true 265 return 266 } 267 } 268 }() 269 } 270 <-done 271 <-done 272 } 273 274 // The function is used to test preemption at split stack checks. 275 // Declaring a var avoids inlining at the call site. 276 var preempt = func() int { 277 var a [128]int 278 sum := 0 279 for _, v := range a { 280 sum += v 281 } 282 return sum 283 } 284 285 func TestPreemption(t *testing.T) { 286 // Test that goroutines are preempted at function calls. 287 N := 5 288 if testing.Short() { 289 N = 2 290 } 291 c := make(chan bool) 292 var x uint32 293 for g := 0; g < 2; g++ { 294 go func(g int) { 295 for i := 0; i < N; i++ { 296 for atomic.LoadUint32(&x) != uint32(g) { 297 preempt() 298 } 299 atomic.StoreUint32(&x, uint32(1-g)) 300 } 301 c <- true 302 }(g) 303 } 304 <-c 305 <-c 306 } 307 308 func TestPreemptionGC(t *testing.T) { 309 // Test that pending GC preempts running goroutines. 310 P := 5 311 N := 10 312 if testing.Short() { 313 P = 3 314 N = 2 315 } 316 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1)) 317 var stop uint32 318 for i := 0; i < P; i++ { 319 go func() { 320 for atomic.LoadUint32(&stop) == 0 { 321 preempt() 322 } 323 }() 324 } 325 for i := 0; i < N; i++ { 326 runtime.Gosched() 327 runtime.GC() 328 } 329 atomic.StoreUint32(&stop, 1) 330 } 331 332 func TestGCFairness(t *testing.T) { 333 output := runTestProg(t, "testprog", "GCFairness") 334 want := "OK\n" 335 if output != want { 336 t.Fatalf("want %s, got %s\n", want, output) 337 } 338 } 339 340 func TestNumGoroutine(t *testing.T) { 341 output := runTestProg(t, "testprog", "NumGoroutine") 342 want := "1\n" 343 if output != want { 344 t.Fatalf("want %q, got %q", want, output) 345 } 346 347 buf := make([]byte, 1<<20) 348 349 // Try up to 10 times for a match before giving up. 350 // This is a fundamentally racy check but it's important 351 // to notice if NumGoroutine and Stack are _always_ out of sync. 352 for i := 0; ; i++ { 353 // Give goroutines about to exit a chance to exit. 354 // The NumGoroutine and Stack below need to see 355 // the same state of the world, so anything we can do 356 // to keep it quiet is good. 357 runtime.Gosched() 358 359 n := runtime.NumGoroutine() 360 buf = buf[:runtime.Stack(buf, true)] 361 362 nstk := strings.Count(string(buf), "goroutine ") 363 if n == nstk { 364 break 365 } 366 if i >= 10 { 367 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf) 368 } 369 } 370 } 371 372 func TestPingPongHog(t *testing.T) { 373 if testing.Short() { 374 t.Skip("skipping in -short mode") 375 } 376 377 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 378 done := make(chan bool) 379 hogChan, lightChan := make(chan bool), make(chan bool) 380 hogCount, lightCount := 0, 0 381 382 run := func(limit int, counter *int, wake chan bool) { 383 for { 384 select { 385 case <-done: 386 return 387 388 case <-wake: 389 for i := 0; i < limit; i++ { 390 *counter++ 391 } 392 wake <- true 393 } 394 } 395 } 396 397 // Start two co-scheduled hog goroutines. 398 for i := 0; i < 2; i++ { 399 go run(1e6, &hogCount, hogChan) 400 } 401 402 // Start two co-scheduled light goroutines. 403 for i := 0; i < 2; i++ { 404 go run(1e3, &lightCount, lightChan) 405 } 406 407 // Start goroutine pairs and wait for a few preemption rounds. 408 hogChan <- true 409 lightChan <- true 410 time.Sleep(100 * time.Millisecond) 411 close(done) 412 <-hogChan 413 <-lightChan 414 415 // Check that hogCount and lightCount are within a factor of 416 // 2, which indicates that both pairs of goroutines handed off 417 // the P within a time-slice to their buddy. 418 if hogCount > lightCount*2 || lightCount > hogCount*2 { 419 t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount)) 420 } 421 } 422 423 func BenchmarkPingPongHog(b *testing.B) { 424 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 425 426 // Create a CPU hog 427 stop, done := make(chan bool), make(chan bool) 428 go func() { 429 for { 430 select { 431 case <-stop: 432 done <- true 433 return 434 default: 435 } 436 } 437 }() 438 439 // Ping-pong b.N times 440 ping, pong := make(chan bool), make(chan bool) 441 go func() { 442 for j := 0; j < b.N; j++ { 443 pong <- <-ping 444 } 445 close(stop) 446 done <- true 447 }() 448 go func() { 449 for i := 0; i < b.N; i++ { 450 ping <- <-pong 451 } 452 done <- true 453 }() 454 b.ResetTimer() 455 ping <- true // Start ping-pong 456 <-stop 457 b.StopTimer() 458 <-ping // Let last ponger exit 459 <-done // Make sure goroutines exit 460 <-done 461 <-done 462 } 463 464 func stackGrowthRecursive(i int) { 465 var pad [128]uint64 466 if i != 0 && pad[0] == 0 { 467 stackGrowthRecursive(i - 1) 468 } 469 } 470 471 func TestPreemptSplitBig(t *testing.T) { 472 if testing.Short() { 473 t.Skip("skipping in -short mode") 474 } 475 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 476 stop := make(chan int) 477 go big(stop) 478 for i := 0; i < 3; i++ { 479 time.Sleep(10 * time.Microsecond) // let big start running 480 runtime.GC() 481 } 482 close(stop) 483 } 484 485 func big(stop chan int) int { 486 n := 0 487 for { 488 // delay so that gc is sure to have asked for a preemption 489 for i := 0; i < 1e9; i++ { 490 n++ 491 } 492 493 // call bigframe, which used to miss the preemption in its prologue. 494 bigframe(stop) 495 496 // check if we've been asked to stop. 497 select { 498 case <-stop: 499 return n 500 } 501 } 502 } 503 504 func bigframe(stop chan int) int { 505 // not splitting the stack will overflow. 506 // small will notice that it needs a stack split and will 507 // catch the overflow. 508 var x [8192]byte 509 return small(stop, &x) 510 } 511 512 func small(stop chan int, x *[8192]byte) int { 513 for i := range x { 514 x[i] = byte(i) 515 } 516 sum := 0 517 for i := range x { 518 sum += int(x[i]) 519 } 520 521 // keep small from being a leaf function, which might 522 // make it not do any stack check at all. 523 nonleaf(stop) 524 525 return sum 526 } 527 528 func nonleaf(stop chan int) bool { 529 // do something that won't be inlined: 530 select { 531 case <-stop: 532 return true 533 default: 534 return false 535 } 536 } 537 538 func TestSchedLocalQueue(t *testing.T) { 539 runtime.RunSchedLocalQueueTest() 540 } 541 542 func TestSchedLocalQueueSteal(t *testing.T) { 543 runtime.RunSchedLocalQueueStealTest() 544 } 545 546 func benchmarkStackGrowth(b *testing.B, rec int) { 547 b.RunParallel(func(pb *testing.PB) { 548 for pb.Next() { 549 stackGrowthRecursive(rec) 550 } 551 }) 552 } 553 554 func BenchmarkStackGrowth(b *testing.B) { 555 benchmarkStackGrowth(b, 10) 556 } 557 558 func BenchmarkStackGrowthDeep(b *testing.B) { 559 benchmarkStackGrowth(b, 1024) 560 } 561 562 func BenchmarkCreateGoroutines(b *testing.B) { 563 benchmarkCreateGoroutines(b, 1) 564 } 565 566 func BenchmarkCreateGoroutinesParallel(b *testing.B) { 567 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1)) 568 } 569 570 func benchmarkCreateGoroutines(b *testing.B, procs int) { 571 c := make(chan bool) 572 var f func(n int) 573 f = func(n int) { 574 if n == 0 { 575 c <- true 576 return 577 } 578 go f(n - 1) 579 } 580 for i := 0; i < procs; i++ { 581 go f(b.N / procs) 582 } 583 for i := 0; i < procs; i++ { 584 <-c 585 } 586 } 587 588 func BenchmarkCreateGoroutinesCapture(b *testing.B) { 589 b.ReportAllocs() 590 for i := 0; i < b.N; i++ { 591 const N = 4 592 var wg sync.WaitGroup 593 wg.Add(N) 594 for i := 0; i < N; i++ { 595 i := i 596 go func() { 597 if i >= N { 598 b.Logf("bad") // just to capture b 599 } 600 wg.Done() 601 }() 602 } 603 wg.Wait() 604 } 605 } 606 607 func BenchmarkClosureCall(b *testing.B) { 608 sum := 0 609 off1 := 1 610 for i := 0; i < b.N; i++ { 611 off2 := 2 612 func() { 613 sum += i + off1 + off2 614 }() 615 } 616 _ = sum 617 } 618 619 type Matrix [][]float64 620 621 func BenchmarkMatmult(b *testing.B) { 622 b.StopTimer() 623 // matmult is O(N**3) but testing expects O(b.N), 624 // so we need to take cube root of b.N 625 n := int(math.Cbrt(float64(b.N))) + 1 626 A := makeMatrix(n) 627 B := makeMatrix(n) 628 C := makeMatrix(n) 629 b.StartTimer() 630 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8) 631 } 632 633 func makeMatrix(n int) Matrix { 634 m := make(Matrix, n) 635 for i := 0; i < n; i++ { 636 m[i] = make([]float64, n) 637 for j := 0; j < n; j++ { 638 m[i][j] = float64(i*n + j) 639 } 640 } 641 return m 642 } 643 644 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) { 645 di := i1 - i0 646 dj := j1 - j0 647 dk := k1 - k0 648 if di >= dj && di >= dk && di >= threshold { 649 // divide in two by y axis 650 mi := i0 + di/2 651 done1 := make(chan struct{}, 1) 652 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold) 653 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold) 654 <-done1 655 } else if dj >= dk && dj >= threshold { 656 // divide in two by x axis 657 mj := j0 + dj/2 658 done1 := make(chan struct{}, 1) 659 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold) 660 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold) 661 <-done1 662 } else if dk >= threshold { 663 // divide in two by "k" axis 664 // deliberately not parallel because of data races 665 mk := k0 + dk/2 666 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold) 667 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold) 668 } else { 669 // the matrices are small enough, compute directly 670 for i := i0; i < i1; i++ { 671 for j := j0; j < j1; j++ { 672 for k := k0; k < k1; k++ { 673 C[i][j] += A[i][k] * B[k][j] 674 } 675 } 676 } 677 } 678 if done != nil { 679 done <- struct{}{} 680 } 681 }