github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/proc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "fmt" 9 "internal/race" 10 "internal/testenv" 11 "math" 12 "net" 13 "runtime" 14 "runtime/debug" 15 "strings" 16 "sync" 17 "sync/atomic" 18 "syscall" 19 "testing" 20 "time" 21 ) 22 23 var stop = make(chan bool, 1) 24 25 func perpetuumMobile() { 26 select { 27 case <-stop: 28 default: 29 go perpetuumMobile() 30 } 31 } 32 33 func TestStopTheWorldDeadlock(t *testing.T) { 34 if runtime.GOARCH == "wasm" { 35 t.Skip("no preemption on wasm yet") 36 } 37 if testing.Short() { 38 t.Skip("skipping during short test") 39 } 40 maxprocs := runtime.GOMAXPROCS(3) 41 compl := make(chan bool, 2) 42 go func() { 43 for i := 0; i != 1000; i += 1 { 44 runtime.GC() 45 } 46 compl <- true 47 }() 48 go func() { 49 for i := 0; i != 1000; i += 1 { 50 runtime.GOMAXPROCS(3) 51 } 52 compl <- true 53 }() 54 go perpetuumMobile() 55 <-compl 56 <-compl 57 stop <- true 58 runtime.GOMAXPROCS(maxprocs) 59 } 60 61 func TestYieldProgress(t *testing.T) { 62 testYieldProgress(false) 63 } 64 65 func TestYieldLockedProgress(t *testing.T) { 66 testYieldProgress(true) 67 } 68 69 func testYieldProgress(locked bool) { 70 c := make(chan bool) 71 cack := make(chan bool) 72 go func() { 73 if locked { 74 runtime.LockOSThread() 75 } 76 for { 77 select { 78 case <-c: 79 cack <- true 80 return 81 default: 82 runtime.Gosched() 83 } 84 } 85 }() 86 time.Sleep(10 * time.Millisecond) 87 c <- true 88 <-cack 89 } 90 91 func TestYieldLocked(t *testing.T) { 92 const N = 10 93 c := make(chan bool) 94 go func() { 95 runtime.LockOSThread() 96 for i := 0; i < N; i++ { 97 runtime.Gosched() 98 time.Sleep(time.Millisecond) 99 } 100 c <- true 101 // runtime.UnlockOSThread() is deliberately omitted 102 }() 103 <-c 104 } 105 106 func TestGoroutineParallelism(t *testing.T) { 107 if runtime.NumCPU() == 1 { 108 // Takes too long, too easy to deadlock, etc. 109 t.Skip("skipping on uniprocessor") 110 } 111 P := 4 112 N := 10 113 if testing.Short() { 114 P = 3 115 N = 3 116 } 117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 118 // If runtime triggers a forced GC during this test then it will deadlock, 119 // since the goroutines can't be stopped/preempted. 120 // Disable GC for this test (see issue #10958). 121 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 122 for try := 0; try < N; try++ { 123 done := make(chan bool) 124 x := uint32(0) 125 for p := 0; p < P; p++ { 126 // Test that all P goroutines are scheduled at the same time 127 go func(p int) { 128 for i := 0; i < 3; i++ { 129 expected := uint32(P*i + p) 130 for atomic.LoadUint32(&x) != expected { 131 } 132 atomic.StoreUint32(&x, expected+1) 133 } 134 done <- true 135 }(p) 136 } 137 for p := 0; p < P; p++ { 138 <-done 139 } 140 } 141 } 142 143 // Test that all runnable goroutines are scheduled at the same time. 144 func TestGoroutineParallelism2(t *testing.T) { 145 //testGoroutineParallelism2(t, false, false) 146 testGoroutineParallelism2(t, true, false) 147 testGoroutineParallelism2(t, false, true) 148 testGoroutineParallelism2(t, true, true) 149 } 150 151 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) { 152 if runtime.NumCPU() == 1 { 153 // Takes too long, too easy to deadlock, etc. 154 t.Skip("skipping on uniprocessor") 155 } 156 P := 4 157 N := 10 158 if testing.Short() { 159 N = 3 160 } 161 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) 162 // If runtime triggers a forced GC during this test then it will deadlock, 163 // since the goroutines can't be stopped/preempted. 164 // Disable GC for this test (see issue #10958). 165 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 166 for try := 0; try < N; try++ { 167 if load { 168 // Create P goroutines and wait until they all run. 169 // When we run the actual test below, worker threads 170 // running the goroutines will start parking. 171 done := make(chan bool) 172 x := uint32(0) 173 for p := 0; p < P; p++ { 174 go func() { 175 if atomic.AddUint32(&x, 1) == uint32(P) { 176 done <- true 177 return 178 } 179 for atomic.LoadUint32(&x) != uint32(P) { 180 } 181 }() 182 } 183 <-done 184 } 185 if netpoll { 186 // Enable netpoller, affects schedler behavior. 187 laddr := "localhost:0" 188 if runtime.GOOS == "android" { 189 // On some Android devices, there are no records for localhost, 190 // see https://golang.org/issues/14486. 191 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems. 192 laddr = "127.0.0.1:0" 193 } 194 ln, err := net.Listen("tcp", laddr) 195 if err != nil { 196 defer ln.Close() // yup, defer in a loop 197 } 198 } 199 done := make(chan bool) 200 x := uint32(0) 201 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism. 202 for p := 0; p < P/2; p++ { 203 go func(p int) { 204 for p2 := 0; p2 < 2; p2++ { 205 go func(p2 int) { 206 for i := 0; i < 3; i++ { 207 expected := uint32(P*i + p*2 + p2) 208 for atomic.LoadUint32(&x) != expected { 209 } 210 atomic.StoreUint32(&x, expected+1) 211 } 212 done <- true 213 }(p2) 214 } 215 }(p) 216 } 217 for p := 0; p < P; p++ { 218 <-done 219 } 220 } 221 } 222 223 func TestBlockLocked(t *testing.T) { 224 const N = 10 225 c := make(chan bool) 226 go func() { 227 runtime.LockOSThread() 228 for i := 0; i < N; i++ { 229 c <- true 230 } 231 runtime.UnlockOSThread() 232 }() 233 for i := 0; i < N; i++ { 234 <-c 235 } 236 } 237 238 func TestTimerFairness(t *testing.T) { 239 if runtime.GOARCH == "wasm" { 240 t.Skip("no preemption on wasm yet") 241 } 242 243 done := make(chan bool) 244 c := make(chan bool) 245 for i := 0; i < 2; i++ { 246 go func() { 247 for { 248 select { 249 case c <- true: 250 case <-done: 251 return 252 } 253 } 254 }() 255 } 256 257 timer := time.After(20 * time.Millisecond) 258 for { 259 select { 260 case <-c: 261 case <-timer: 262 close(done) 263 return 264 } 265 } 266 } 267 268 func TestTimerFairness2(t *testing.T) { 269 if runtime.GOARCH == "wasm" { 270 t.Skip("no preemption on wasm yet") 271 } 272 273 done := make(chan bool) 274 c := make(chan bool) 275 for i := 0; i < 2; i++ { 276 go func() { 277 timer := time.After(20 * time.Millisecond) 278 var buf [1]byte 279 for { 280 syscall.Read(0, buf[0:0]) 281 select { 282 case c <- true: 283 case <-c: 284 case <-timer: 285 done <- true 286 return 287 } 288 } 289 }() 290 } 291 <-done 292 <-done 293 } 294 295 // The function is used to test preemption at split stack checks. 296 // Declaring a var avoids inlining at the call site. 297 var preempt = func() int { 298 var a [128]int 299 sum := 0 300 for _, v := range a { 301 sum += v 302 } 303 return sum 304 } 305 306 func TestPreemption(t *testing.T) { 307 if runtime.GOARCH == "wasm" { 308 t.Skip("no preemption on wasm yet") 309 } 310 311 // Test that goroutines are preempted at function calls. 312 N := 5 313 if testing.Short() { 314 N = 2 315 } 316 c := make(chan bool) 317 var x uint32 318 for g := 0; g < 2; g++ { 319 go func(g int) { 320 for i := 0; i < N; i++ { 321 for atomic.LoadUint32(&x) != uint32(g) { 322 preempt() 323 } 324 atomic.StoreUint32(&x, uint32(1-g)) 325 } 326 c <- true 327 }(g) 328 } 329 <-c 330 <-c 331 } 332 333 func TestPreemptionGC(t *testing.T) { 334 if runtime.GOARCH == "wasm" { 335 t.Skip("no preemption on wasm yet") 336 } 337 338 // Test that pending GC preempts running goroutines. 339 P := 5 340 N := 10 341 if testing.Short() { 342 P = 3 343 N = 2 344 } 345 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1)) 346 var stop uint32 347 for i := 0; i < P; i++ { 348 go func() { 349 for atomic.LoadUint32(&stop) == 0 { 350 preempt() 351 } 352 }() 353 } 354 for i := 0; i < N; i++ { 355 runtime.Gosched() 356 runtime.GC() 357 } 358 atomic.StoreUint32(&stop, 1) 359 } 360 361 func TestAsyncPreempt(t *testing.T) { 362 if !runtime.PreemptMSupported { 363 t.Skip("asynchronous preemption not supported on this platform") 364 } 365 output := runTestProg(t, "testprog", "AsyncPreempt") 366 want := "OK\n" 367 if output != want { 368 t.Fatalf("want %s, got %s\n", want, output) 369 } 370 } 371 372 func TestGCFairness(t *testing.T) { 373 output := runTestProg(t, "testprog", "GCFairness") 374 want := "OK\n" 375 if output != want { 376 t.Fatalf("want %s, got %s\n", want, output) 377 } 378 } 379 380 func TestGCFairness2(t *testing.T) { 381 output := runTestProg(t, "testprog", "GCFairness2") 382 want := "OK\n" 383 if output != want { 384 t.Fatalf("want %s, got %s\n", want, output) 385 } 386 } 387 388 func TestNumGoroutine(t *testing.T) { 389 output := runTestProg(t, "testprog", "NumGoroutine") 390 want := "1\n" 391 if output != want { 392 t.Fatalf("want %q, got %q", want, output) 393 } 394 395 buf := make([]byte, 1<<20) 396 397 // Try up to 10 times for a match before giving up. 398 // This is a fundamentally racy check but it's important 399 // to notice if NumGoroutine and Stack are _always_ out of sync. 400 for i := 0; ; i++ { 401 // Give goroutines about to exit a chance to exit. 402 // The NumGoroutine and Stack below need to see 403 // the same state of the world, so anything we can do 404 // to keep it quiet is good. 405 runtime.Gosched() 406 407 n := runtime.NumGoroutine() 408 buf = buf[:runtime.Stack(buf, true)] 409 410 nstk := strings.Count(string(buf), "goroutine ") 411 if n == nstk { 412 break 413 } 414 if i >= 10 { 415 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf) 416 } 417 } 418 } 419 420 func TestPingPongHog(t *testing.T) { 421 if runtime.GOARCH == "wasm" { 422 t.Skip("no preemption on wasm yet") 423 } 424 if testing.Short() { 425 t.Skip("skipping in -short mode") 426 } 427 if race.Enabled { 428 // The race detector randomizes the scheduler, 429 // which causes this test to fail (#38266). 430 t.Skip("skipping in -race mode") 431 } 432 433 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 434 done := make(chan bool) 435 hogChan, lightChan := make(chan bool), make(chan bool) 436 hogCount, lightCount := 0, 0 437 438 run := func(limit int, counter *int, wake chan bool) { 439 for { 440 select { 441 case <-done: 442 return 443 444 case <-wake: 445 for i := 0; i < limit; i++ { 446 *counter++ 447 } 448 wake <- true 449 } 450 } 451 } 452 453 // Start two co-scheduled hog goroutines. 454 for i := 0; i < 2; i++ { 455 go run(1e6, &hogCount, hogChan) 456 } 457 458 // Start two co-scheduled light goroutines. 459 for i := 0; i < 2; i++ { 460 go run(1e3, &lightCount, lightChan) 461 } 462 463 // Start goroutine pairs and wait for a few preemption rounds. 464 hogChan <- true 465 lightChan <- true 466 time.Sleep(100 * time.Millisecond) 467 close(done) 468 <-hogChan 469 <-lightChan 470 471 // Check that hogCount and lightCount are within a factor of 472 // 5, which indicates that both pairs of goroutines handed off 473 // the P within a time-slice to their buddy. We can use a 474 // fairly large factor here to make this robust: if the 475 // scheduler isn't working right, the gap should be ~1000X. 476 const factor = 5 477 if hogCount > lightCount*factor || lightCount > hogCount*factor { 478 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount)) 479 } 480 } 481 482 func BenchmarkPingPongHog(b *testing.B) { 483 if b.N == 0 { 484 return 485 } 486 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 487 488 // Create a CPU hog 489 stop, done := make(chan bool), make(chan bool) 490 go func() { 491 for { 492 select { 493 case <-stop: 494 done <- true 495 return 496 default: 497 } 498 } 499 }() 500 501 // Ping-pong b.N times 502 ping, pong := make(chan bool), make(chan bool) 503 go func() { 504 for j := 0; j < b.N; j++ { 505 pong <- <-ping 506 } 507 close(stop) 508 done <- true 509 }() 510 go func() { 511 for i := 0; i < b.N; i++ { 512 ping <- <-pong 513 } 514 done <- true 515 }() 516 b.ResetTimer() 517 ping <- true // Start ping-pong 518 <-stop 519 b.StopTimer() 520 <-ping // Let last ponger exit 521 <-done // Make sure goroutines exit 522 <-done 523 <-done 524 } 525 526 var padData [128]uint64 527 528 func stackGrowthRecursive(i int) { 529 var pad [128]uint64 530 pad = padData 531 for j := range pad { 532 if pad[j] != 0 { 533 return 534 } 535 } 536 if i != 0 { 537 stackGrowthRecursive(i - 1) 538 } 539 } 540 541 func TestPreemptSplitBig(t *testing.T) { 542 if testing.Short() { 543 t.Skip("skipping in -short mode") 544 } 545 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 546 stop := make(chan int) 547 go big(stop) 548 for i := 0; i < 3; i++ { 549 time.Sleep(10 * time.Microsecond) // let big start running 550 runtime.GC() 551 } 552 close(stop) 553 } 554 555 func big(stop chan int) int { 556 n := 0 557 for { 558 // delay so that gc is sure to have asked for a preemption 559 for i := 0; i < 1e9; i++ { 560 n++ 561 } 562 563 // call bigframe, which used to miss the preemption in its prologue. 564 bigframe(stop) 565 566 // check if we've been asked to stop. 567 select { 568 case <-stop: 569 return n 570 } 571 } 572 } 573 574 func bigframe(stop chan int) int { 575 // not splitting the stack will overflow. 576 // small will notice that it needs a stack split and will 577 // catch the overflow. 578 var x [8192]byte 579 return small(stop, &x) 580 } 581 582 func small(stop chan int, x *[8192]byte) int { 583 for i := range x { 584 x[i] = byte(i) 585 } 586 sum := 0 587 for i := range x { 588 sum += int(x[i]) 589 } 590 591 // keep small from being a leaf function, which might 592 // make it not do any stack check at all. 593 nonleaf(stop) 594 595 return sum 596 } 597 598 func nonleaf(stop chan int) bool { 599 // do something that won't be inlined: 600 select { 601 case <-stop: 602 return true 603 default: 604 return false 605 } 606 } 607 608 func TestSchedLocalQueue(t *testing.T) { 609 runtime.RunSchedLocalQueueTest() 610 } 611 612 func TestSchedLocalQueueSteal(t *testing.T) { 613 runtime.RunSchedLocalQueueStealTest() 614 } 615 616 func TestSchedLocalQueueEmpty(t *testing.T) { 617 if runtime.NumCPU() == 1 { 618 // Takes too long and does not trigger the race. 619 t.Skip("skipping on uniprocessor") 620 } 621 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) 622 623 // If runtime triggers a forced GC during this test then it will deadlock, 624 // since the goroutines can't be stopped/preempted during spin wait. 625 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 626 627 iters := int(1e5) 628 if testing.Short() { 629 iters = 1e2 630 } 631 runtime.RunSchedLocalQueueEmptyTest(iters) 632 } 633 634 func benchmarkStackGrowth(b *testing.B, rec int) { 635 b.RunParallel(func(pb *testing.PB) { 636 for pb.Next() { 637 stackGrowthRecursive(rec) 638 } 639 }) 640 } 641 642 func BenchmarkStackGrowth(b *testing.B) { 643 benchmarkStackGrowth(b, 10) 644 } 645 646 func BenchmarkStackGrowthDeep(b *testing.B) { 647 benchmarkStackGrowth(b, 1024) 648 } 649 650 func BenchmarkCreateGoroutines(b *testing.B) { 651 benchmarkCreateGoroutines(b, 1) 652 } 653 654 func BenchmarkCreateGoroutinesParallel(b *testing.B) { 655 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1)) 656 } 657 658 func benchmarkCreateGoroutines(b *testing.B, procs int) { 659 c := make(chan bool) 660 var f func(n int) 661 f = func(n int) { 662 if n == 0 { 663 c <- true 664 return 665 } 666 go f(n - 1) 667 } 668 for i := 0; i < procs; i++ { 669 go f(b.N / procs) 670 } 671 for i := 0; i < procs; i++ { 672 <-c 673 } 674 } 675 676 func BenchmarkCreateGoroutinesCapture(b *testing.B) { 677 b.ReportAllocs() 678 for i := 0; i < b.N; i++ { 679 const N = 4 680 var wg sync.WaitGroup 681 wg.Add(N) 682 for i := 0; i < N; i++ { 683 i := i 684 go func() { 685 if i >= N { 686 b.Logf("bad") // just to capture b 687 } 688 wg.Done() 689 }() 690 } 691 wg.Wait() 692 } 693 } 694 695 func BenchmarkClosureCall(b *testing.B) { 696 sum := 0 697 off1 := 1 698 for i := 0; i < b.N; i++ { 699 off2 := 2 700 func() { 701 sum += i + off1 + off2 702 }() 703 } 704 _ = sum 705 } 706 707 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) { 708 if runtime.GOMAXPROCS(0) == 1 { 709 b.Skip("skipping: GOMAXPROCS=1") 710 } 711 712 wakeDelay := 5 * time.Microsecond 713 for _, delay := range []time.Duration{ 714 0, 715 1 * time.Microsecond, 716 2 * time.Microsecond, 717 5 * time.Microsecond, 718 10 * time.Microsecond, 719 20 * time.Microsecond, 720 50 * time.Microsecond, 721 100 * time.Microsecond, 722 } { 723 b.Run(delay.String(), func(b *testing.B) { 724 if b.N == 0 { 725 return 726 } 727 // Start two goroutines, which alternate between being 728 // sender and receiver in the following protocol: 729 // 730 // - The receiver spins for `delay` and then does a 731 // blocking receive on a channel. 732 // 733 // - The sender spins for `delay+wakeDelay` and then 734 // sends to the same channel. (The addition of 735 // `wakeDelay` improves the probability that the 736 // receiver will be blocking when the send occurs when 737 // the goroutines execute in parallel.) 738 // 739 // In each iteration of the benchmark, each goroutine 740 // acts once as sender and once as receiver, so each 741 // goroutine spins for delay twice. 742 // 743 // BenchmarkWakeupParallel is used to estimate how 744 // efficiently the scheduler parallelizes goroutines in 745 // the presence of blocking: 746 // 747 // - If both goroutines are executed on the same core, 748 // an increase in delay by N will increase the time per 749 // iteration by 4*N, because all 4 delays are 750 // serialized. 751 // 752 // - Otherwise, an increase in delay by N will increase 753 // the time per iteration by 2*N, and the time per 754 // iteration is 2 * (runtime overhead + chan 755 // send/receive pair + delay + wakeDelay). This allows 756 // the runtime overhead, including the time it takes 757 // for the unblocked goroutine to be scheduled, to be 758 // estimated. 759 ping, pong := make(chan struct{}), make(chan struct{}) 760 start := make(chan struct{}) 761 done := make(chan struct{}) 762 go func() { 763 <-start 764 for i := 0; i < b.N; i++ { 765 // sender 766 spin(delay + wakeDelay) 767 ping <- struct{}{} 768 // receiver 769 spin(delay) 770 <-pong 771 } 772 done <- struct{}{} 773 }() 774 go func() { 775 for i := 0; i < b.N; i++ { 776 // receiver 777 spin(delay) 778 <-ping 779 // sender 780 spin(delay + wakeDelay) 781 pong <- struct{}{} 782 } 783 done <- struct{}{} 784 }() 785 b.ResetTimer() 786 start <- struct{}{} 787 <-done 788 <-done 789 }) 790 } 791 } 792 793 func BenchmarkWakeupParallelSpinning(b *testing.B) { 794 benchmarkWakeupParallel(b, func(d time.Duration) { 795 end := time.Now().Add(d) 796 for time.Now().Before(end) { 797 // do nothing 798 } 799 }) 800 } 801 802 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go) 803 // to sleep for the given duration. If nil, dependent tests are skipped. 804 // The implementation should invoke a blocking system call and not 805 // call time.Sleep, which would deschedule the goroutine. 806 var sysNanosleep func(d time.Duration) 807 808 func BenchmarkWakeupParallelSyscall(b *testing.B) { 809 if sysNanosleep == nil { 810 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS) 811 } 812 benchmarkWakeupParallel(b, func(d time.Duration) { 813 sysNanosleep(d) 814 }) 815 } 816 817 type Matrix [][]float64 818 819 func BenchmarkMatmult(b *testing.B) { 820 b.StopTimer() 821 // matmult is O(N**3) but testing expects O(b.N), 822 // so we need to take cube root of b.N 823 n := int(math.Cbrt(float64(b.N))) + 1 824 A := makeMatrix(n) 825 B := makeMatrix(n) 826 C := makeMatrix(n) 827 b.StartTimer() 828 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8) 829 } 830 831 func makeMatrix(n int) Matrix { 832 m := make(Matrix, n) 833 for i := 0; i < n; i++ { 834 m[i] = make([]float64, n) 835 for j := 0; j < n; j++ { 836 m[i][j] = float64(i*n + j) 837 } 838 } 839 return m 840 } 841 842 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) { 843 di := i1 - i0 844 dj := j1 - j0 845 dk := k1 - k0 846 if di >= dj && di >= dk && di >= threshold { 847 // divide in two by y axis 848 mi := i0 + di/2 849 done1 := make(chan struct{}, 1) 850 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold) 851 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold) 852 <-done1 853 } else if dj >= dk && dj >= threshold { 854 // divide in two by x axis 855 mj := j0 + dj/2 856 done1 := make(chan struct{}, 1) 857 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold) 858 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold) 859 <-done1 860 } else if dk >= threshold { 861 // divide in two by "k" axis 862 // deliberately not parallel because of data races 863 mk := k0 + dk/2 864 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold) 865 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold) 866 } else { 867 // the matrices are small enough, compute directly 868 for i := i0; i < i1; i++ { 869 for j := j0; j < j1; j++ { 870 for k := k0; k < k1; k++ { 871 C[i][j] += A[i][k] * B[k][j] 872 } 873 } 874 } 875 } 876 if done != nil { 877 done <- struct{}{} 878 } 879 } 880 881 func TestStealOrder(t *testing.T) { 882 runtime.RunStealOrderTest() 883 } 884 885 func TestLockOSThreadNesting(t *testing.T) { 886 if runtime.GOARCH == "wasm" { 887 t.Skip("no threads on wasm yet") 888 } 889 890 go func() { 891 e, i := runtime.LockOSCounts() 892 if e != 0 || i != 0 { 893 t.Errorf("want locked counts 0, 0; got %d, %d", e, i) 894 return 895 } 896 runtime.LockOSThread() 897 runtime.LockOSThread() 898 runtime.UnlockOSThread() 899 e, i = runtime.LockOSCounts() 900 if e != 1 || i != 0 { 901 t.Errorf("want locked counts 1, 0; got %d, %d", e, i) 902 return 903 } 904 runtime.UnlockOSThread() 905 e, i = runtime.LockOSCounts() 906 if e != 0 || i != 0 { 907 t.Errorf("want locked counts 0, 0; got %d, %d", e, i) 908 return 909 } 910 }() 911 } 912 913 func TestLockOSThreadExit(t *testing.T) { 914 testLockOSThreadExit(t, "testprog") 915 } 916 917 func testLockOSThreadExit(t *testing.T, prog string) { 918 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1") 919 want := "OK\n" 920 if output != want { 921 t.Errorf("want %q, got %q", want, output) 922 } 923 924 output = runTestProg(t, prog, "LockOSThreadAlt") 925 if output != want { 926 t.Errorf("want %q, got %q", want, output) 927 } 928 } 929 930 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) { 931 want := "OK\n" 932 skip := "unshare not permitted\n" 933 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1") 934 if output == skip { 935 t.Skip("unshare syscall not permitted on this system") 936 } else if output != want { 937 t.Errorf("want %q, got %q", want, output) 938 } 939 } 940 941 func TestLockOSThreadTemplateThreadRace(t *testing.T) { 942 testenv.MustHaveGoRun(t) 943 944 exe, err := buildTestProg(t, "testprog") 945 if err != nil { 946 t.Fatal(err) 947 } 948 949 iterations := 100 950 if testing.Short() { 951 // Reduce run time to ~100ms, with much lower probability of 952 // catching issues. 953 iterations = 5 954 } 955 for i := 0; i < iterations; i++ { 956 want := "OK\n" 957 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace") 958 if output != want { 959 t.Fatalf("run %d: want %q, got %q", i, want, output) 960 } 961 } 962 } 963 964 // fakeSyscall emulates a system call. 965 //go:nosplit 966 func fakeSyscall(duration time.Duration) { 967 runtime.Entersyscall() 968 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); { 969 } 970 runtime.Exitsyscall() 971 } 972 973 // Check that a goroutine will be preempted if it is calling short system calls. 974 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) { 975 if runtime.GOARCH == "wasm" { 976 t.Skip("no preemption on wasm yet") 977 } 978 979 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 980 981 interations := 10 982 if testing.Short() { 983 interations = 1 984 } 985 const ( 986 maxDuration = 3 * time.Second 987 nroutines = 8 988 ) 989 990 for i := 0; i < interations; i++ { 991 c := make(chan bool, nroutines) 992 stop := uint32(0) 993 994 start := time.Now() 995 for g := 0; g < nroutines; g++ { 996 go func(stop *uint32) { 997 c <- true 998 for atomic.LoadUint32(stop) == 0 { 999 fakeSyscall(syscallDuration) 1000 } 1001 c <- true 1002 }(&stop) 1003 } 1004 // wait until all goroutines have started. 1005 for g := 0; g < nroutines; g++ { 1006 <-c 1007 } 1008 atomic.StoreUint32(&stop, 1) 1009 // wait until all goroutines have finished. 1010 for g := 0; g < nroutines; g++ { 1011 <-c 1012 } 1013 duration := time.Since(start) 1014 1015 if duration > maxDuration { 1016 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration) 1017 } 1018 } 1019 } 1020 1021 func TestPreemptionAfterSyscall(t *testing.T) { 1022 for _, i := range []time.Duration{10, 100, 1000} { 1023 d := i * time.Microsecond 1024 t.Run(fmt.Sprint(d), func(t *testing.T) { 1025 testPreemptionAfterSyscall(t, d) 1026 }) 1027 } 1028 } 1029 1030 func TestGetgThreadSwitch(t *testing.T) { 1031 runtime.RunGetgThreadSwitchTest() 1032 } 1033 1034 // TestNetpollBreak tests that netpollBreak can break a netpoll. 1035 // This test is not particularly safe since the call to netpoll 1036 // will pick up any stray files that are ready, but it should work 1037 // OK as long it is not run in parallel. 1038 func TestNetpollBreak(t *testing.T) { 1039 if runtime.GOMAXPROCS(0) == 1 { 1040 t.Skip("skipping: GOMAXPROCS=1") 1041 } 1042 1043 // Make sure that netpoll is initialized. 1044 runtime.NetpollGenericInit() 1045 1046 start := time.Now() 1047 c := make(chan bool, 2) 1048 go func() { 1049 c <- true 1050 runtime.Netpoll(10 * time.Second.Nanoseconds()) 1051 c <- true 1052 }() 1053 <-c 1054 // Loop because the break might get eaten by the scheduler. 1055 // Break twice to break both the netpoll we started and the 1056 // scheduler netpoll. 1057 loop: 1058 for { 1059 runtime.Usleep(100) 1060 runtime.NetpollBreak() 1061 runtime.NetpollBreak() 1062 select { 1063 case <-c: 1064 break loop 1065 default: 1066 } 1067 } 1068 if dur := time.Since(start); dur > 5*time.Second { 1069 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur) 1070 } 1071 } 1072 1073 // TestBigGOMAXPROCS tests that setting GOMAXPROCS to a large value 1074 // doesn't cause a crash at startup. See issue 38474. 1075 func TestBigGOMAXPROCS(t *testing.T) { 1076 t.Parallel() 1077 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024") 1078 // Ignore error conditions on small machines. 1079 for _, errstr := range []string{ 1080 "failed to create new OS thread", 1081 "cannot allocate memory", 1082 } { 1083 if strings.Contains(output, errstr) { 1084 t.Skipf("failed to create 1024 threads") 1085 } 1086 } 1087 if !strings.Contains(output, "unknown function: NonexistentTest") { 1088 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output) 1089 } 1090 }