github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/proc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "fmt" 9 "internal/race" 10 "internal/testenv" 11 "math" 12 "net" 13 "runtime" 14 "runtime/debug" 15 "strings" 16 "sync" 17 "sync/atomic" 18 "syscall" 19 "testing" 20 "time" 21 ) 22 23 var stop = make(chan bool, 1) 24 25 func perpetuumMobile() { 26 select { 27 case <-stop: 28 default: 29 go perpetuumMobile() 30 } 31 } 32 33 func TestStopTheWorldDeadlock(t *testing.T) { 34 if runtime.GOARCH == "wasm" { 35 t.Skip("no preemption on wasm yet") 36 } 37 if testing.Short() { 38 t.Skip("skipping during short test") 39 } 40 maxprocs := runtime.GOMAXPROCS(3) 41 compl := make(chan bool, 2) 42 go func() { 43 for i := 0; i != 1000; i += 1 { 44 runtime.GC() 45 } 46 compl <- true 47 }() 48 go func() { 49 for i := 0; i != 1000; i += 1 { 50 runtime.GOMAXPROCS(3) 51 } 52 compl <- true 53 }() 54 go perpetuumMobile() 55 <-compl 56 <-compl 57 stop <- true 58 runtime.GOMAXPROCS(maxprocs) 59 } 60 61 func TestYieldProgress(t *testing.T) { 62 testYieldProgress(false) 63 } 64 65 func TestYieldLockedProgress(t *testing.T) { 66 testYieldProgress(true) 67 } 68 69 func testYieldProgress(locked bool) { 70 c := make(chan bool) 71 cack := make(chan bool) 72 go func() { 73 if locked { 74 runtime.LockOSThread() 75 } 76 for { 77 select { 78 case <-c: 79 cack <- true 80 return 81 default: 82 runtime.Gosched() 83 } 84 } 85 }() 86 time.Sleep(10 * time.Millisecond) 87 c <- true 88 <-cack 89 } 90 91 func TestYieldLocked(t *testing.T) { 92 const N = 10 93 c := make(chan bool) 94 go func() { 95 runtime.LockOSThread() 96 for i := 0; i < N; i++ { 97 runtime.Gosched() 98 time.Sleep(time.Millisecond) 99 } 100 c <- true 101 // runtime.UnlockOSThread() is deliberately omitted 102 }() 103 <-c 104 } 105 106 func TestGoroutineParallelism(t *testing.T) { 107 if runtime.NumCPU() == 1 { 108 // Takes too long, too easy to deadlock, etc. 109 t.Skip("skipping on uniprocessor") 110 } 111 P := 4 112 N := 10 113 if testing.Short() { 114 P = 3 115 N = 3 116 } 117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 118 // If runtime triggers a forced GC during this test then it will deadlock, 119 // since the goroutines can't be stopped/preempted. 120 // Disable GC for this test (see issue #10958). 121 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 122 // SetGCPercent waits until the mark phase is over, but the runtime 123 // also preempts at the start of the sweep phase, so make sure that's 124 // done too. See #45867. 125 runtime.GC() 126 for try := 0; try < N; try++ { 127 done := make(chan bool) 128 x := uint32(0) 129 for p := 0; p < P; p++ { 130 // Test that all P goroutines are scheduled at the same time 131 go func(p int) { 132 for i := 0; i < 3; i++ { 133 expected := uint32(P*i + p) 134 for atomic.LoadUint32(&x) != expected { 135 } 136 atomic.StoreUint32(&x, expected+1) 137 } 138 done <- true 139 }(p) 140 } 141 for p := 0; p < P; p++ { 142 <-done 143 } 144 } 145 } 146 147 // Test that all runnable goroutines are scheduled at the same time. 148 func TestGoroutineParallelism2(t *testing.T) { 149 //testGoroutineParallelism2(t, false, false) 150 testGoroutineParallelism2(t, true, false) 151 testGoroutineParallelism2(t, false, true) 152 testGoroutineParallelism2(t, true, true) 153 } 154 155 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) { 156 if runtime.NumCPU() == 1 { 157 // Takes too long, too easy to deadlock, etc. 158 t.Skip("skipping on uniprocessor") 159 } 160 P := 4 161 N := 10 162 if testing.Short() { 163 N = 3 164 } 165 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 166 // If runtime triggers a forced GC during this test then it will deadlock, 167 // since the goroutines can't be stopped/preempted. 168 // Disable GC for this test (see issue #10958). 169 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 170 // SetGCPercent waits until the mark phase is over, but the runtime 171 // also preempts at the start of the sweep phase, so make sure that's 172 // done too. See #45867. 173 runtime.GC() 174 for try := 0; try < N; try++ { 175 if load { 176 // Create P goroutines and wait until they all run. 177 // When we run the actual test below, worker threads 178 // running the goroutines will start parking. 179 done := make(chan bool) 180 x := uint32(0) 181 for p := 0; p < P; p++ { 182 go func() { 183 if atomic.AddUint32(&x, 1) == uint32(P) { 184 done <- true 185 return 186 } 187 for atomic.LoadUint32(&x) != uint32(P) { 188 } 189 }() 190 } 191 <-done 192 } 193 if netpoll { 194 // Enable netpoller, affects schedler behavior. 195 laddr := "localhost:0" 196 if runtime.GOOS == "android" { 197 // On some Android devices, there are no records for localhost, 198 // see https://golang.org/issues/14486. 199 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems. 200 laddr = "127.0.0.1:0" 201 } 202 ln, err := net.Listen("tcp", laddr) 203 if err != nil { 204 defer ln.Close() // yup, defer in a loop 205 } 206 } 207 done := make(chan bool) 208 x := uint32(0) 209 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism. 210 for p := 0; p < P/2; p++ { 211 go func(p int) { 212 for p2 := 0; p2 < 2; p2++ { 213 go func(p2 int) { 214 for i := 0; i < 3; i++ { 215 expected := uint32(P*i + p*2 + p2) 216 for atomic.LoadUint32(&x) != expected { 217 } 218 atomic.StoreUint32(&x, expected+1) 219 } 220 done <- true 221 }(p2) 222 } 223 }(p) 224 } 225 for p := 0; p < P; p++ { 226 <-done 227 } 228 } 229 } 230 231 func TestBlockLocked(t *testing.T) { 232 const N = 10 233 c := make(chan bool) 234 go func() { 235 runtime.LockOSThread() 236 for i := 0; i < N; i++ { 237 c <- true 238 } 239 runtime.UnlockOSThread() 240 }() 241 for i := 0; i < N; i++ { 242 <-c 243 } 244 } 245 246 func TestTimerFairness(t *testing.T) { 247 if runtime.GOARCH == "wasm" { 248 t.Skip("no preemption on wasm yet") 249 } 250 251 done := make(chan bool) 252 c := make(chan bool) 253 for i := 0; i < 2; i++ { 254 go func() { 255 for { 256 select { 257 case c <- true: 258 case <-done: 259 return 260 } 261 } 262 }() 263 } 264 265 timer := time.After(20 * time.Millisecond) 266 for { 267 select { 268 case <-c: 269 case <-timer: 270 close(done) 271 return 272 } 273 } 274 } 275 276 func TestTimerFairness2(t *testing.T) { 277 if runtime.GOARCH == "wasm" { 278 t.Skip("no preemption on wasm yet") 279 } 280 281 done := make(chan bool) 282 c := make(chan bool) 283 for i := 0; i < 2; i++ { 284 go func() { 285 timer := time.After(20 * time.Millisecond) 286 var buf [1]byte 287 for { 288 syscall.Read(0, buf[0:0]) 289 select { 290 case c <- true: 291 case <-c: 292 case <-timer: 293 done <- true 294 return 295 } 296 } 297 }() 298 } 299 <-done 300 <-done 301 } 302 303 // The function is used to test preemption at split stack checks. 304 // Declaring a var avoids inlining at the call site. 305 var preempt = func() int { 306 var a [128]int 307 sum := 0 308 for _, v := range a { 309 sum += v 310 } 311 return sum 312 } 313 314 func TestPreemption(t *testing.T) { 315 if runtime.GOARCH == "wasm" { 316 t.Skip("no preemption on wasm yet") 317 } 318 319 // Test that goroutines are preempted at function calls. 320 N := 5 321 if testing.Short() { 322 N = 2 323 } 324 c := make(chan bool) 325 var x uint32 326 for g := 0; g < 2; g++ { 327 go func(g int) { 328 for i := 0; i < N; i++ { 329 for atomic.LoadUint32(&x) != uint32(g) { 330 preempt() 331 } 332 atomic.StoreUint32(&x, uint32(1-g)) 333 } 334 c <- true 335 }(g) 336 } 337 <-c 338 <-c 339 } 340 341 func TestPreemptionGC(t *testing.T) { 342 if runtime.GOARCH == "wasm" { 343 t.Skip("no preemption on wasm yet") 344 } 345 346 // Test that pending GC preempts running goroutines. 347 P := 5 348 N := 10 349 if testing.Short() { 350 P = 3 351 N = 2 352 } 353 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1)) 354 var stop uint32 355 for i := 0; i < P; i++ { 356 go func() { 357 for atomic.LoadUint32(&stop) == 0 { 358 preempt() 359 } 360 }() 361 } 362 for i := 0; i < N; i++ { 363 runtime.Gosched() 364 runtime.GC() 365 } 366 atomic.StoreUint32(&stop, 1) 367 } 368 369 func TestAsyncPreempt(t *testing.T) { 370 if !runtime.PreemptMSupported { 371 t.Skip("asynchronous preemption not supported on this platform") 372 } 373 output := runTestProg(t, "testprog", "AsyncPreempt") 374 want := "OK\n" 375 if output != want { 376 t.Fatalf("want %s, got %s\n", want, output) 377 } 378 } 379 380 func TestGCFairness(t *testing.T) { 381 output := runTestProg(t, "testprog", "GCFairness") 382 want := "OK\n" 383 if output != want { 384 t.Fatalf("want %s, got %s\n", want, output) 385 } 386 } 387 388 func TestGCFairness2(t *testing.T) { 389 output := runTestProg(t, "testprog", "GCFairness2") 390 want := "OK\n" 391 if output != want { 392 t.Fatalf("want %s, got %s\n", want, output) 393 } 394 } 395 396 func TestNumGoroutine(t *testing.T) { 397 output := runTestProg(t, "testprog", "NumGoroutine") 398 want := "1\n" 399 if output != want { 400 t.Fatalf("want %q, got %q", want, output) 401 } 402 403 buf := make([]byte, 1<<20) 404 405 // Try up to 10 times for a match before giving up. 406 // This is a fundamentally racy check but it's important 407 // to notice if NumGoroutine and Stack are _always_ out of sync. 408 for i := 0; ; i++ { 409 // Give goroutines about to exit a chance to exit. 410 // The NumGoroutine and Stack below need to see 411 // the same state of the world, so anything we can do 412 // to keep it quiet is good. 413 runtime.Gosched() 414 415 n := runtime.NumGoroutine() 416 buf = buf[:runtime.Stack(buf, true)] 417 418 nstk := strings.Count(string(buf), "goroutine ") 419 if n == nstk { 420 break 421 } 422 if i >= 10 { 423 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf) 424 } 425 } 426 } 427 428 func TestPingPongHog(t *testing.T) { 429 if runtime.GOARCH == "wasm" { 430 t.Skip("no preemption on wasm yet") 431 } 432 if testing.Short() { 433 t.Skip("skipping in -short mode") 434 } 435 if race.Enabled { 436 // The race detector randomizes the scheduler, 437 // which causes this test to fail (#38266). 438 t.Skip("skipping in -race mode") 439 } 440 441 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 442 done := make(chan bool) 443 hogChan, lightChan := make(chan bool), make(chan bool) 444 hogCount, lightCount := 0, 0 445 446 run := func(limit int, counter *int, wake chan bool) { 447 for { 448 select { 449 case <-done: 450 return 451 452 case <-wake: 453 for i := 0; i < limit; i++ { 454 *counter++ 455 } 456 wake <- true 457 } 458 } 459 } 460 461 // Start two co-scheduled hog goroutines. 462 for i := 0; i < 2; i++ { 463 go run(1e6, &hogCount, hogChan) 464 } 465 466 // Start two co-scheduled light goroutines. 467 for i := 0; i < 2; i++ { 468 go run(1e3, &lightCount, lightChan) 469 } 470 471 // Start goroutine pairs and wait for a few preemption rounds. 472 hogChan <- true 473 lightChan <- true 474 time.Sleep(100 * time.Millisecond) 475 close(done) 476 <-hogChan 477 <-lightChan 478 479 // Check that hogCount and lightCount are within a factor of 480 // 20, which indicates that both pairs of goroutines handed off 481 // the P within a time-slice to their buddy. We can use a 482 // fairly large factor here to make this robust: if the 483 // scheduler isn't working right, the gap should be ~1000X 484 // (was 5, increased to 20, see issue 52207). 485 const factor = 20 486 if hogCount/factor > lightCount || lightCount/factor > hogCount { 487 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount)) 488 } 489 } 490 491 func BenchmarkPingPongHog(b *testing.B) { 492 if b.N == 0 { 493 return 494 } 495 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 496 497 // Create a CPU hog 498 stop, done := make(chan bool), make(chan bool) 499 go func() { 500 for { 501 select { 502 case <-stop: 503 done <- true 504 return 505 default: 506 } 507 } 508 }() 509 510 // Ping-pong b.N times 511 ping, pong := make(chan bool), make(chan bool) 512 go func() { 513 for j := 0; j < b.N; j++ { 514 pong <- <-ping 515 } 516 close(stop) 517 done <- true 518 }() 519 go func() { 520 for i := 0; i < b.N; i++ { 521 ping <- <-pong 522 } 523 done <- true 524 }() 525 b.ResetTimer() 526 ping <- true // Start ping-pong 527 <-stop 528 b.StopTimer() 529 <-ping // Let last ponger exit 530 <-done // Make sure goroutines exit 531 <-done 532 <-done 533 } 534 535 var padData [128]uint64 536 537 func stackGrowthRecursive(i int) { 538 var pad [128]uint64 539 pad = padData 540 for j := range pad { 541 if pad[j] != 0 { 542 return 543 } 544 } 545 if i != 0 { 546 stackGrowthRecursive(i - 1) 547 } 548 } 549 550 func TestPreemptSplitBig(t *testing.T) { 551 if testing.Short() { 552 t.Skip("skipping in -short mode") 553 } 554 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 555 stop := make(chan int) 556 go big(stop) 557 for i := 0; i < 3; i++ { 558 time.Sleep(10 * time.Microsecond) // let big start running 559 runtime.GC() 560 } 561 close(stop) 562 } 563 564 func big(stop chan int) int { 565 n := 0 566 for { 567 // delay so that gc is sure to have asked for a preemption 568 for i := 0; i < 1e9; i++ { 569 n++ 570 } 571 572 // call bigframe, which used to miss the preemption in its prologue. 573 bigframe(stop) 574 575 // check if we've been asked to stop. 576 select { 577 case <-stop: 578 return n 579 } 580 } 581 } 582 583 func bigframe(stop chan int) int { 584 // not splitting the stack will overflow. 585 // small will notice that it needs a stack split and will 586 // catch the overflow. 587 var x [8192]byte 588 return small(stop, &x) 589 } 590 591 func small(stop chan int, x *[8192]byte) int { 592 for i := range x { 593 x[i] = byte(i) 594 } 595 sum := 0 596 for i := range x { 597 sum += int(x[i]) 598 } 599 600 // keep small from being a leaf function, which might 601 // make it not do any stack check at all. 602 nonleaf(stop) 603 604 return sum 605 } 606 607 func nonleaf(stop chan int) bool { 608 // do something that won't be inlined: 609 select { 610 case <-stop: 611 return true 612 default: 613 return false 614 } 615 } 616 617 func TestSchedLocalQueue(t *testing.T) { 618 runtime.RunSchedLocalQueueTest() 619 } 620 621 func TestSchedLocalQueueSteal(t *testing.T) { 622 runtime.RunSchedLocalQueueStealTest() 623 } 624 625 func TestSchedLocalQueueEmpty(t *testing.T) { 626 if runtime.NumCPU() == 1 { 627 // Takes too long and does not trigger the race. 628 t.Skip("skipping on uniprocessor") 629 } 630 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) 631 632 // If runtime triggers a forced GC during this test then it will deadlock, 633 // since the goroutines can't be stopped/preempted during spin wait. 634 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 635 // SetGCPercent waits until the mark phase is over, but the runtime 636 // also preempts at the start of the sweep phase, so make sure that's 637 // done too. See #45867. 638 runtime.GC() 639 640 iters := int(1e5) 641 if testing.Short() { 642 iters = 1e2 643 } 644 runtime.RunSchedLocalQueueEmptyTest(iters) 645 } 646 647 func benchmarkStackGrowth(b *testing.B, rec int) { 648 b.RunParallel(func(pb *testing.PB) { 649 for pb.Next() { 650 stackGrowthRecursive(rec) 651 } 652 }) 653 } 654 655 func BenchmarkStackGrowth(b *testing.B) { 656 benchmarkStackGrowth(b, 10) 657 } 658 659 func BenchmarkStackGrowthDeep(b *testing.B) { 660 benchmarkStackGrowth(b, 1024) 661 } 662 663 func BenchmarkCreateGoroutines(b *testing.B) { 664 benchmarkCreateGoroutines(b, 1) 665 } 666 667 func BenchmarkCreateGoroutinesParallel(b *testing.B) { 668 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1)) 669 } 670 671 func benchmarkCreateGoroutines(b *testing.B, procs int) { 672 c := make(chan bool) 673 var f func(n int) 674 f = func(n int) { 675 if n == 0 { 676 c <- true 677 return 678 } 679 go f(n - 1) 680 } 681 for i := 0; i < procs; i++ { 682 go f(b.N / procs) 683 } 684 for i := 0; i < procs; i++ { 685 <-c 686 } 687 } 688 689 func BenchmarkCreateGoroutinesCapture(b *testing.B) { 690 b.ReportAllocs() 691 for i := 0; i < b.N; i++ { 692 const N = 4 693 var wg sync.WaitGroup 694 wg.Add(N) 695 for i := 0; i < N; i++ { 696 i := i 697 go func() { 698 if i >= N { 699 b.Logf("bad") // just to capture b 700 } 701 wg.Done() 702 }() 703 } 704 wg.Wait() 705 } 706 } 707 708 // warmupScheduler ensures the scheduler has at least targetThreadCount threads 709 // in its thread pool. 710 func warmupScheduler(targetThreadCount int) { 711 var wg sync.WaitGroup 712 var count int32 713 for i := 0; i < targetThreadCount; i++ { 714 wg.Add(1) 715 go func() { 716 atomic.AddInt32(&count, 1) 717 for atomic.LoadInt32(&count) < int32(targetThreadCount) { 718 // spin until all threads started 719 } 720 721 // spin a bit more to ensure they are all running on separate CPUs. 722 doWork(time.Millisecond) 723 wg.Done() 724 }() 725 } 726 wg.Wait() 727 } 728 729 func doWork(dur time.Duration) { 730 start := time.Now() 731 for time.Since(start) < dur { 732 } 733 } 734 735 // BenchmarkCreateGoroutinesSingle creates many goroutines, all from a single 736 // producer (the main benchmark goroutine). 737 // 738 // Compared to BenchmarkCreateGoroutines, this causes different behavior in the 739 // scheduler because Ms are much more likely to need to steal work from the 740 // main P rather than having work in the local run queue. 741 func BenchmarkCreateGoroutinesSingle(b *testing.B) { 742 // Since we are interested in stealing behavior, warm the scheduler to 743 // get all the Ps running first. 744 warmupScheduler(runtime.GOMAXPROCS(0)) 745 b.ResetTimer() 746 747 var wg sync.WaitGroup 748 wg.Add(b.N) 749 for i := 0; i < b.N; i++ { 750 go func() { 751 wg.Done() 752 }() 753 } 754 wg.Wait() 755 } 756 757 func BenchmarkClosureCall(b *testing.B) { 758 sum := 0 759 off1 := 1 760 for i := 0; i < b.N; i++ { 761 off2 := 2 762 func() { 763 sum += i + off1 + off2 764 }() 765 } 766 _ = sum 767 } 768 769 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) { 770 if runtime.GOMAXPROCS(0) == 1 { 771 b.Skip("skipping: GOMAXPROCS=1") 772 } 773 774 wakeDelay := 5 * time.Microsecond 775 for _, delay := range []time.Duration{ 776 0, 777 1 * time.Microsecond, 778 2 * time.Microsecond, 779 5 * time.Microsecond, 780 10 * time.Microsecond, 781 20 * time.Microsecond, 782 50 * time.Microsecond, 783 100 * time.Microsecond, 784 } { 785 b.Run(delay.String(), func(b *testing.B) { 786 if b.N == 0 { 787 return 788 } 789 // Start two goroutines, which alternate between being 790 // sender and receiver in the following protocol: 791 // 792 // - The receiver spins for `delay` and then does a 793 // blocking receive on a channel. 794 // 795 // - The sender spins for `delay+wakeDelay` and then 796 // sends to the same channel. (The addition of 797 // `wakeDelay` improves the probability that the 798 // receiver will be blocking when the send occurs when 799 // the goroutines execute in parallel.) 800 // 801 // In each iteration of the benchmark, each goroutine 802 // acts once as sender and once as receiver, so each 803 // goroutine spins for delay twice. 804 // 805 // BenchmarkWakeupParallel is used to estimate how 806 // efficiently the scheduler parallelizes goroutines in 807 // the presence of blocking: 808 // 809 // - If both goroutines are executed on the same core, 810 // an increase in delay by N will increase the time per 811 // iteration by 4*N, because all 4 delays are 812 // serialized. 813 // 814 // - Otherwise, an increase in delay by N will increase 815 // the time per iteration by 2*N, and the time per 816 // iteration is 2 * (runtime overhead + chan 817 // send/receive pair + delay + wakeDelay). This allows 818 // the runtime overhead, including the time it takes 819 // for the unblocked goroutine to be scheduled, to be 820 // estimated. 821 ping, pong := make(chan struct{}), make(chan struct{}) 822 start := make(chan struct{}) 823 done := make(chan struct{}) 824 go func() { 825 <-start 826 for i := 0; i < b.N; i++ { 827 // sender 828 spin(delay + wakeDelay) 829 ping <- struct{}{} 830 // receiver 831 spin(delay) 832 <-pong 833 } 834 done <- struct{}{} 835 }() 836 go func() { 837 for i := 0; i < b.N; i++ { 838 // receiver 839 spin(delay) 840 <-ping 841 // sender 842 spin(delay + wakeDelay) 843 pong <- struct{}{} 844 } 845 done <- struct{}{} 846 }() 847 b.ResetTimer() 848 start <- struct{}{} 849 <-done 850 <-done 851 }) 852 } 853 } 854 855 func BenchmarkWakeupParallelSpinning(b *testing.B) { 856 benchmarkWakeupParallel(b, func(d time.Duration) { 857 end := time.Now().Add(d) 858 for time.Now().Before(end) { 859 // do nothing 860 } 861 }) 862 } 863 864 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go) 865 // to sleep for the given duration. If nil, dependent tests are skipped. 866 // The implementation should invoke a blocking system call and not 867 // call time.Sleep, which would deschedule the goroutine. 868 var sysNanosleep func(d time.Duration) 869 870 func BenchmarkWakeupParallelSyscall(b *testing.B) { 871 if sysNanosleep == nil { 872 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS) 873 } 874 benchmarkWakeupParallel(b, func(d time.Duration) { 875 sysNanosleep(d) 876 }) 877 } 878 879 type Matrix [][]float64 880 881 func BenchmarkMatmult(b *testing.B) { 882 b.StopTimer() 883 // matmult is O(N**3) but testing expects O(b.N), 884 // so we need to take cube root of b.N 885 n := int(math.Cbrt(float64(b.N))) + 1 886 A := makeMatrix(n) 887 B := makeMatrix(n) 888 C := makeMatrix(n) 889 b.StartTimer() 890 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8) 891 } 892 893 func makeMatrix(n int) Matrix { 894 m := make(Matrix, n) 895 for i := 0; i < n; i++ { 896 m[i] = make([]float64, n) 897 for j := 0; j < n; j++ { 898 m[i][j] = float64(i*n + j) 899 } 900 } 901 return m 902 } 903 904 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) { 905 di := i1 - i0 906 dj := j1 - j0 907 dk := k1 - k0 908 if di >= dj && di >= dk && di >= threshold { 909 // divide in two by y axis 910 mi := i0 + di/2 911 done1 := make(chan struct{}, 1) 912 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold) 913 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold) 914 <-done1 915 } else if dj >= dk && dj >= threshold { 916 // divide in two by x axis 917 mj := j0 + dj/2 918 done1 := make(chan struct{}, 1) 919 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold) 920 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold) 921 <-done1 922 } else if dk >= threshold { 923 // divide in two by "k" axis 924 // deliberately not parallel because of data races 925 mk := k0 + dk/2 926 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold) 927 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold) 928 } else { 929 // the matrices are small enough, compute directly 930 for i := i0; i < i1; i++ { 931 for j := j0; j < j1; j++ { 932 for k := k0; k < k1; k++ { 933 C[i][j] += A[i][k] * B[k][j] 934 } 935 } 936 } 937 } 938 if done != nil { 939 done <- struct{}{} 940 } 941 } 942 943 func TestStealOrder(t *testing.T) { 944 runtime.RunStealOrderTest() 945 } 946 947 func TestLockOSThreadNesting(t *testing.T) { 948 if runtime.GOARCH == "wasm" { 949 t.Skip("no threads on wasm yet") 950 } 951 952 go func() { 953 e, i := runtime.LockOSCounts() 954 if e != 0 || i != 0 { 955 t.Errorf("want locked counts 0, 0; got %d, %d", e, i) 956 return 957 } 958 runtime.LockOSThread() 959 runtime.LockOSThread() 960 runtime.UnlockOSThread() 961 e, i = runtime.LockOSCounts() 962 if e != 1 || i != 0 { 963 t.Errorf("want locked counts 1, 0; got %d, %d", e, i) 964 return 965 } 966 runtime.UnlockOSThread() 967 e, i = runtime.LockOSCounts() 968 if e != 0 || i != 0 { 969 t.Errorf("want locked counts 0, 0; got %d, %d", e, i) 970 return 971 } 972 }() 973 } 974 975 func TestLockOSThreadExit(t *testing.T) { 976 testLockOSThreadExit(t, "testprog") 977 } 978 979 func testLockOSThreadExit(t *testing.T, prog string) { 980 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1") 981 want := "OK\n" 982 if output != want { 983 t.Errorf("want %q, got %q", want, output) 984 } 985 986 output = runTestProg(t, prog, "LockOSThreadAlt") 987 if output != want { 988 t.Errorf("want %q, got %q", want, output) 989 } 990 } 991 992 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) { 993 want := "OK\n" 994 skip := "unshare not permitted\n" 995 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1") 996 if output == skip { 997 t.Skip("unshare syscall not permitted on this system") 998 } else if output != want { 999 t.Errorf("want %q, got %q", want, output) 1000 } 1001 } 1002 1003 func TestLockOSThreadTemplateThreadRace(t *testing.T) { 1004 testenv.MustHaveGoRun(t) 1005 1006 exe, err := buildTestProg(t, "testprog") 1007 if err != nil { 1008 t.Fatal(err) 1009 } 1010 1011 iterations := 100 1012 if testing.Short() { 1013 // Reduce run time to ~100ms, with much lower probability of 1014 // catching issues. 1015 iterations = 5 1016 } 1017 for i := 0; i < iterations; i++ { 1018 want := "OK\n" 1019 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace") 1020 if output != want { 1021 t.Fatalf("run %d: want %q, got %q", i, want, output) 1022 } 1023 } 1024 } 1025 1026 // fakeSyscall emulates a system call. 1027 // 1028 //go:nosplit 1029 func fakeSyscall(duration time.Duration) { 1030 runtime.Entersyscall() 1031 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); { 1032 } 1033 runtime.Exitsyscall() 1034 } 1035 1036 // Check that a goroutine will be preempted if it is calling short system calls. 1037 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) { 1038 if runtime.GOARCH == "wasm" { 1039 t.Skip("no preemption on wasm yet") 1040 } 1041 1042 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 1043 1044 interations := 10 1045 if testing.Short() { 1046 interations = 1 1047 } 1048 const ( 1049 maxDuration = 5 * time.Second 1050 nroutines = 8 1051 ) 1052 1053 for i := 0; i < interations; i++ { 1054 c := make(chan bool, nroutines) 1055 stop := uint32(0) 1056 1057 start := time.Now() 1058 for g := 0; g < nroutines; g++ { 1059 go func(stop *uint32) { 1060 c <- true 1061 for atomic.LoadUint32(stop) == 0 { 1062 fakeSyscall(syscallDuration) 1063 } 1064 c <- true 1065 }(&stop) 1066 } 1067 // wait until all goroutines have started. 1068 for g := 0; g < nroutines; g++ { 1069 <-c 1070 } 1071 atomic.StoreUint32(&stop, 1) 1072 // wait until all goroutines have finished. 1073 for g := 0; g < nroutines; g++ { 1074 <-c 1075 } 1076 duration := time.Since(start) 1077 1078 if duration > maxDuration { 1079 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration) 1080 } 1081 } 1082 } 1083 1084 func TestPreemptionAfterSyscall(t *testing.T) { 1085 if runtime.GOOS == "plan9" { 1086 testenv.SkipFlaky(t, 41015) 1087 } 1088 1089 for _, i := range []time.Duration{10, 100, 1000} { 1090 d := i * time.Microsecond 1091 t.Run(fmt.Sprint(d), func(t *testing.T) { 1092 testPreemptionAfterSyscall(t, d) 1093 }) 1094 } 1095 } 1096 1097 func TestGetgThreadSwitch(t *testing.T) { 1098 runtime.RunGetgThreadSwitchTest() 1099 } 1100 1101 // TestNetpollBreak tests that netpollBreak can break a netpoll. 1102 // This test is not particularly safe since the call to netpoll 1103 // will pick up any stray files that are ready, but it should work 1104 // OK as long it is not run in parallel. 1105 func TestNetpollBreak(t *testing.T) { 1106 if runtime.GOMAXPROCS(0) == 1 { 1107 t.Skip("skipping: GOMAXPROCS=1") 1108 } 1109 1110 // Make sure that netpoll is initialized. 1111 runtime.NetpollGenericInit() 1112 1113 start := time.Now() 1114 c := make(chan bool, 2) 1115 go func() { 1116 c <- true 1117 runtime.Netpoll(10 * time.Second.Nanoseconds()) 1118 c <- true 1119 }() 1120 <-c 1121 // Loop because the break might get eaten by the scheduler. 1122 // Break twice to break both the netpoll we started and the 1123 // scheduler netpoll. 1124 loop: 1125 for { 1126 runtime.Usleep(100) 1127 runtime.NetpollBreak() 1128 runtime.NetpollBreak() 1129 select { 1130 case <-c: 1131 break loop 1132 default: 1133 } 1134 } 1135 if dur := time.Since(start); dur > 5*time.Second { 1136 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur) 1137 } 1138 } 1139 1140 // TestBigGOMAXPROCS tests that setting GOMAXPROCS to a large value 1141 // doesn't cause a crash at startup. See issue 38474. 1142 func TestBigGOMAXPROCS(t *testing.T) { 1143 t.Parallel() 1144 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024") 1145 // Ignore error conditions on small machines. 1146 for _, errstr := range []string{ 1147 "failed to create new OS thread", 1148 "cannot allocate memory", 1149 } { 1150 if strings.Contains(output, errstr) { 1151 t.Skipf("failed to create 1024 threads") 1152 } 1153 } 1154 if !strings.Contains(output, "unknown function: NonexistentTest") { 1155 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output) 1156 } 1157 }