github.com/comwrg/go/src@v0.0.0-20220319063731-c238d0440370/runtime/gc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "fmt" 9 "math/rand" 10 "os" 11 "reflect" 12 "runtime" 13 "runtime/debug" 14 "sort" 15 "strings" 16 "sync" 17 "sync/atomic" 18 "testing" 19 "time" 20 "unsafe" 21 ) 22 23 func TestGcSys(t *testing.T) { 24 if os.Getenv("GOGC") == "off" { 25 t.Skip("skipping test; GOGC=off in environment") 26 } 27 got := runTestProg(t, "testprog", "GCSys") 28 want := "OK\n" 29 if got != want { 30 t.Fatalf("expected %q, but got %q", want, got) 31 } 32 } 33 34 func TestGcDeepNesting(t *testing.T) { 35 type T [2][2][2][2][2][2][2][2][2][2]*int 36 a := new(T) 37 38 // Prevent the compiler from applying escape analysis. 39 // This makes sure new(T) is allocated on heap, not on the stack. 40 t.Logf("%p", a) 41 42 a[0][0][0][0][0][0][0][0][0][0] = new(int) 43 *a[0][0][0][0][0][0][0][0][0][0] = 13 44 runtime.GC() 45 if *a[0][0][0][0][0][0][0][0][0][0] != 13 { 46 t.Fail() 47 } 48 } 49 50 func TestGcMapIndirection(t *testing.T) { 51 defer debug.SetGCPercent(debug.SetGCPercent(1)) 52 runtime.GC() 53 type T struct { 54 a [256]int 55 } 56 m := make(map[T]T) 57 for i := 0; i < 2000; i++ { 58 var a T 59 a.a[0] = i 60 m[a] = T{} 61 } 62 } 63 64 func TestGcArraySlice(t *testing.T) { 65 type X struct { 66 buf [1]byte 67 nextbuf []byte 68 next *X 69 } 70 var head *X 71 for i := 0; i < 10; i++ { 72 p := &X{} 73 p.buf[0] = 42 74 p.next = head 75 if head != nil { 76 p.nextbuf = head.buf[:] 77 } 78 head = p 79 runtime.GC() 80 } 81 for p := head; p != nil; p = p.next { 82 if p.buf[0] != 42 { 83 t.Fatal("corrupted heap") 84 } 85 } 86 } 87 88 func TestGcRescan(t *testing.T) { 89 type X struct { 90 c chan error 91 nextx *X 92 } 93 type Y struct { 94 X 95 nexty *Y 96 p *int 97 } 98 var head *Y 99 for i := 0; i < 10; i++ { 100 p := &Y{} 101 p.c = make(chan error) 102 if head != nil { 103 p.nextx = &head.X 104 } 105 p.nexty = head 106 p.p = new(int) 107 *p.p = 42 108 head = p 109 runtime.GC() 110 } 111 for p := head; p != nil; p = p.nexty { 112 if *p.p != 42 { 113 t.Fatal("corrupted heap") 114 } 115 } 116 } 117 118 func TestGcLastTime(t *testing.T) { 119 ms := new(runtime.MemStats) 120 t0 := time.Now().UnixNano() 121 runtime.GC() 122 t1 := time.Now().UnixNano() 123 runtime.ReadMemStats(ms) 124 last := int64(ms.LastGC) 125 if t0 > last || last > t1 { 126 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) 127 } 128 pause := ms.PauseNs[(ms.NumGC+255)%256] 129 // Due to timer granularity, pause can actually be 0 on windows 130 // or on virtualized environments. 131 if pause == 0 { 132 t.Logf("last GC pause was 0") 133 } else if pause > 10e9 { 134 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) 135 } 136 } 137 138 var hugeSink interface{} 139 140 func TestHugeGCInfo(t *testing.T) { 141 // The test ensures that compiler can chew these huge types even on weakest machines. 142 // The types are not allocated at runtime. 143 if hugeSink != nil { 144 // 400MB on 32 bots, 4TB on 64-bits. 145 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 146 hugeSink = new([n]*byte) 147 hugeSink = new([n]uintptr) 148 hugeSink = new(struct { 149 x float64 150 y [n]*byte 151 z []string 152 }) 153 hugeSink = new(struct { 154 x float64 155 y [n]uintptr 156 z []string 157 }) 158 } 159 } 160 161 func TestPeriodicGC(t *testing.T) { 162 if runtime.GOARCH == "wasm" { 163 t.Skip("no sysmon on wasm yet") 164 } 165 166 // Make sure we're not in the middle of a GC. 167 runtime.GC() 168 169 var ms1, ms2 runtime.MemStats 170 runtime.ReadMemStats(&ms1) 171 172 // Make periodic GC run continuously. 173 orig := *runtime.ForceGCPeriod 174 *runtime.ForceGCPeriod = 0 175 176 // Let some periodic GCs happen. In a heavily loaded system, 177 // it's possible these will be delayed, so this is designed to 178 // succeed quickly if things are working, but to give it some 179 // slack if things are slow. 180 var numGCs uint32 181 const want = 2 182 for i := 0; i < 200 && numGCs < want; i++ { 183 time.Sleep(5 * time.Millisecond) 184 185 // Test that periodic GC actually happened. 186 runtime.ReadMemStats(&ms2) 187 numGCs = ms2.NumGC - ms1.NumGC 188 } 189 *runtime.ForceGCPeriod = orig 190 191 if numGCs < want { 192 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs) 193 } 194 } 195 196 func TestGcZombieReporting(t *testing.T) { 197 // This test is somewhat sensitive to how the allocator works. 198 got := runTestProg(t, "testprog", "GCZombie") 199 want := "found pointer to free object" 200 if !strings.Contains(got, want) { 201 t.Fatalf("expected %q in output, but got %q", want, got) 202 } 203 } 204 205 func TestGCTestMoveStackOnNextCall(t *testing.T) { 206 t.Parallel() 207 var onStack int 208 // GCTestMoveStackOnNextCall can fail in rare cases if there's 209 // a preemption. This won't happen many times in quick 210 // succession, so just retry a few times. 211 for retry := 0; retry < 5; retry++ { 212 runtime.GCTestMoveStackOnNextCall() 213 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) { 214 // Passed. 215 return 216 } 217 } 218 t.Fatal("stack did not move") 219 } 220 221 // This must not be inlined because the point is to force a stack 222 // growth check and move the stack. 223 // 224 //go:noinline 225 func moveStackCheck(t *testing.T, new *int, old uintptr) bool { 226 // new should have been updated by the stack move; 227 // old should not have. 228 229 // Capture new's value before doing anything that could 230 // further move the stack. 231 new2 := uintptr(unsafe.Pointer(new)) 232 233 t.Logf("old stack pointer %x, new stack pointer %x", old, new2) 234 if new2 == old { 235 // Check that we didn't screw up the test's escape analysis. 236 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" { 237 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls) 238 } 239 // This was a real failure. 240 return false 241 } 242 return true 243 } 244 245 func TestGCTestMoveStackRepeatedly(t *testing.T) { 246 // Move the stack repeatedly to make sure we're not doubling 247 // it each time. 248 for i := 0; i < 100; i++ { 249 runtime.GCTestMoveStackOnNextCall() 250 moveStack1(false) 251 } 252 } 253 254 //go:noinline 255 func moveStack1(x bool) { 256 // Make sure this function doesn't get auto-nosplit. 257 if x { 258 println("x") 259 } 260 } 261 262 func TestGCTestIsReachable(t *testing.T) { 263 var all, half []unsafe.Pointer 264 var want uint64 265 for i := 0; i < 16; i++ { 266 // The tiny allocator muddies things, so we use a 267 // scannable type. 268 p := unsafe.Pointer(new(*int)) 269 all = append(all, p) 270 if i%2 == 0 { 271 half = append(half, p) 272 want |= 1 << i 273 } 274 } 275 276 got := runtime.GCTestIsReachable(all...) 277 if want != got { 278 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got) 279 } 280 runtime.KeepAlive(half) 281 } 282 283 var pointerClassSink *int 284 var pointerClassData = 42 285 286 func TestGCTestPointerClass(t *testing.T) { 287 t.Parallel() 288 check := func(p unsafe.Pointer, want string) { 289 t.Helper() 290 got := runtime.GCTestPointerClass(p) 291 if got != want { 292 // Convert the pointer to a uintptr to avoid 293 // escaping it. 294 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got) 295 } 296 } 297 var onStack int 298 var notOnStack int 299 pointerClassSink = ¬OnStack 300 check(unsafe.Pointer(&onStack), "stack") 301 check(unsafe.Pointer(¬OnStack), "heap") 302 check(unsafe.Pointer(&pointerClassSink), "bss") 303 check(unsafe.Pointer(&pointerClassData), "data") 304 check(nil, "other") 305 } 306 307 func BenchmarkSetTypePtr(b *testing.B) { 308 benchSetType(b, new(*byte)) 309 } 310 311 func BenchmarkSetTypePtr8(b *testing.B) { 312 benchSetType(b, new([8]*byte)) 313 } 314 315 func BenchmarkSetTypePtr16(b *testing.B) { 316 benchSetType(b, new([16]*byte)) 317 } 318 319 func BenchmarkSetTypePtr32(b *testing.B) { 320 benchSetType(b, new([32]*byte)) 321 } 322 323 func BenchmarkSetTypePtr64(b *testing.B) { 324 benchSetType(b, new([64]*byte)) 325 } 326 327 func BenchmarkSetTypePtr126(b *testing.B) { 328 benchSetType(b, new([126]*byte)) 329 } 330 331 func BenchmarkSetTypePtr128(b *testing.B) { 332 benchSetType(b, new([128]*byte)) 333 } 334 335 func BenchmarkSetTypePtrSlice(b *testing.B) { 336 benchSetType(b, make([]*byte, 1<<10)) 337 } 338 339 type Node1 struct { 340 Value [1]uintptr 341 Left, Right *byte 342 } 343 344 func BenchmarkSetTypeNode1(b *testing.B) { 345 benchSetType(b, new(Node1)) 346 } 347 348 func BenchmarkSetTypeNode1Slice(b *testing.B) { 349 benchSetType(b, make([]Node1, 32)) 350 } 351 352 type Node8 struct { 353 Value [8]uintptr 354 Left, Right *byte 355 } 356 357 func BenchmarkSetTypeNode8(b *testing.B) { 358 benchSetType(b, new(Node8)) 359 } 360 361 func BenchmarkSetTypeNode8Slice(b *testing.B) { 362 benchSetType(b, make([]Node8, 32)) 363 } 364 365 type Node64 struct { 366 Value [64]uintptr 367 Left, Right *byte 368 } 369 370 func BenchmarkSetTypeNode64(b *testing.B) { 371 benchSetType(b, new(Node64)) 372 } 373 374 func BenchmarkSetTypeNode64Slice(b *testing.B) { 375 benchSetType(b, make([]Node64, 32)) 376 } 377 378 type Node64Dead struct { 379 Left, Right *byte 380 Value [64]uintptr 381 } 382 383 func BenchmarkSetTypeNode64Dead(b *testing.B) { 384 benchSetType(b, new(Node64Dead)) 385 } 386 387 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) { 388 benchSetType(b, make([]Node64Dead, 32)) 389 } 390 391 type Node124 struct { 392 Value [124]uintptr 393 Left, Right *byte 394 } 395 396 func BenchmarkSetTypeNode124(b *testing.B) { 397 benchSetType(b, new(Node124)) 398 } 399 400 func BenchmarkSetTypeNode124Slice(b *testing.B) { 401 benchSetType(b, make([]Node124, 32)) 402 } 403 404 type Node126 struct { 405 Value [126]uintptr 406 Left, Right *byte 407 } 408 409 func BenchmarkSetTypeNode126(b *testing.B) { 410 benchSetType(b, new(Node126)) 411 } 412 413 func BenchmarkSetTypeNode126Slice(b *testing.B) { 414 benchSetType(b, make([]Node126, 32)) 415 } 416 417 type Node128 struct { 418 Value [128]uintptr 419 Left, Right *byte 420 } 421 422 func BenchmarkSetTypeNode128(b *testing.B) { 423 benchSetType(b, new(Node128)) 424 } 425 426 func BenchmarkSetTypeNode128Slice(b *testing.B) { 427 benchSetType(b, make([]Node128, 32)) 428 } 429 430 type Node130 struct { 431 Value [130]uintptr 432 Left, Right *byte 433 } 434 435 func BenchmarkSetTypeNode130(b *testing.B) { 436 benchSetType(b, new(Node130)) 437 } 438 439 func BenchmarkSetTypeNode130Slice(b *testing.B) { 440 benchSetType(b, make([]Node130, 32)) 441 } 442 443 type Node1024 struct { 444 Value [1024]uintptr 445 Left, Right *byte 446 } 447 448 func BenchmarkSetTypeNode1024(b *testing.B) { 449 benchSetType(b, new(Node1024)) 450 } 451 452 func BenchmarkSetTypeNode1024Slice(b *testing.B) { 453 benchSetType(b, make([]Node1024, 32)) 454 } 455 456 func benchSetType(b *testing.B, x interface{}) { 457 v := reflect.ValueOf(x) 458 t := v.Type() 459 switch t.Kind() { 460 case reflect.Ptr: 461 b.SetBytes(int64(t.Elem().Size())) 462 case reflect.Slice: 463 b.SetBytes(int64(t.Elem().Size()) * int64(v.Len())) 464 } 465 b.ResetTimer() 466 runtime.BenchSetType(b.N, x) 467 } 468 469 func BenchmarkAllocation(b *testing.B) { 470 type T struct { 471 x, y *byte 472 } 473 ngo := runtime.GOMAXPROCS(0) 474 work := make(chan bool, b.N+ngo) 475 result := make(chan *T) 476 for i := 0; i < b.N; i++ { 477 work <- true 478 } 479 for i := 0; i < ngo; i++ { 480 work <- false 481 } 482 for i := 0; i < ngo; i++ { 483 go func() { 484 var x *T 485 for <-work { 486 for i := 0; i < 1000; i++ { 487 x = &T{} 488 } 489 } 490 result <- x 491 }() 492 } 493 for i := 0; i < ngo; i++ { 494 <-result 495 } 496 } 497 498 func TestPrintGC(t *testing.T) { 499 if testing.Short() { 500 t.Skip("Skipping in short mode") 501 } 502 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 503 done := make(chan bool) 504 go func() { 505 for { 506 select { 507 case <-done: 508 return 509 default: 510 runtime.GC() 511 } 512 } 513 }() 514 for i := 0; i < 1e4; i++ { 515 func() { 516 defer print("") 517 }() 518 } 519 close(done) 520 } 521 522 func testTypeSwitch(x interface{}) error { 523 switch y := x.(type) { 524 case nil: 525 // ok 526 case error: 527 return y 528 } 529 return nil 530 } 531 532 func testAssert(x interface{}) error { 533 if y, ok := x.(error); ok { 534 return y 535 } 536 return nil 537 } 538 539 func testAssertVar(x interface{}) error { 540 var y, ok = x.(error) 541 if ok { 542 return y 543 } 544 return nil 545 } 546 547 var a bool 548 549 //go:noinline 550 func testIfaceEqual(x interface{}) { 551 if x == "abc" { 552 a = true 553 } 554 } 555 556 func TestPageAccounting(t *testing.T) { 557 // Grow the heap in small increments. This used to drop the 558 // pages-in-use count below zero because of a rounding 559 // mismatch (golang.org/issue/15022). 560 const blockSize = 64 << 10 561 blocks := make([]*[blockSize]byte, (64<<20)/blockSize) 562 for i := range blocks { 563 blocks[i] = new([blockSize]byte) 564 } 565 566 // Check that the running page count matches reality. 567 pagesInUse, counted := runtime.CountPagesInUse() 568 if pagesInUse != counted { 569 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted) 570 } 571 } 572 573 func TestReadMemStats(t *testing.T) { 574 base, slow := runtime.ReadMemStatsSlow() 575 if base != slow { 576 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow)) 577 t.Fatal("memstats mismatch") 578 } 579 } 580 581 func logDiff(t *testing.T, prefix string, got, want reflect.Value) { 582 typ := got.Type() 583 switch typ.Kind() { 584 case reflect.Array, reflect.Slice: 585 if got.Len() != want.Len() { 586 t.Logf("len(%s): got %v, want %v", prefix, got, want) 587 return 588 } 589 for i := 0; i < got.Len(); i++ { 590 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i)) 591 } 592 case reflect.Struct: 593 for i := 0; i < typ.NumField(); i++ { 594 gf, wf := got.Field(i), want.Field(i) 595 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf) 596 } 597 case reflect.Map: 598 t.Fatal("not implemented: logDiff for map") 599 default: 600 if got.Interface() != want.Interface() { 601 t.Logf("%s: got %v, want %v", prefix, got, want) 602 } 603 } 604 } 605 606 func BenchmarkReadMemStats(b *testing.B) { 607 var ms runtime.MemStats 608 const heapSize = 100 << 20 609 x := make([]*[1024]byte, heapSize/1024) 610 for i := range x { 611 x[i] = new([1024]byte) 612 } 613 hugeSink = x 614 615 b.ResetTimer() 616 for i := 0; i < b.N; i++ { 617 runtime.ReadMemStats(&ms) 618 } 619 620 hugeSink = nil 621 } 622 623 func applyGCLoad(b *testing.B) func() { 624 // We’ll apply load to the runtime with maxProcs-1 goroutines 625 // and use one more to actually benchmark. It doesn't make sense 626 // to try to run this test with only 1 P (that's what 627 // BenchmarkReadMemStats is for). 628 maxProcs := runtime.GOMAXPROCS(-1) 629 if maxProcs == 1 { 630 b.Skip("This benchmark can only be run with GOMAXPROCS > 1") 631 } 632 633 // Code to build a big tree with lots of pointers. 634 type node struct { 635 children [16]*node 636 } 637 var buildTree func(depth int) *node 638 buildTree = func(depth int) *node { 639 tree := new(node) 640 if depth != 0 { 641 for i := range tree.children { 642 tree.children[i] = buildTree(depth - 1) 643 } 644 } 645 return tree 646 } 647 648 // Keep the GC busy by continuously generating large trees. 649 done := make(chan struct{}) 650 var wg sync.WaitGroup 651 for i := 0; i < maxProcs-1; i++ { 652 wg.Add(1) 653 go func() { 654 defer wg.Done() 655 var hold *node 656 loop: 657 for { 658 hold = buildTree(5) 659 select { 660 case <-done: 661 break loop 662 default: 663 } 664 } 665 runtime.KeepAlive(hold) 666 }() 667 } 668 return func() { 669 close(done) 670 wg.Wait() 671 } 672 } 673 674 func BenchmarkReadMemStatsLatency(b *testing.B) { 675 stop := applyGCLoad(b) 676 677 // Spend this much time measuring latencies. 678 latencies := make([]time.Duration, 0, 1024) 679 680 // Run for timeToBench hitting ReadMemStats continuously 681 // and measuring the latency. 682 b.ResetTimer() 683 var ms runtime.MemStats 684 for i := 0; i < b.N; i++ { 685 // Sleep for a bit, otherwise we're just going to keep 686 // stopping the world and no one will get to do anything. 687 time.Sleep(100 * time.Millisecond) 688 start := time.Now() 689 runtime.ReadMemStats(&ms) 690 latencies = append(latencies, time.Now().Sub(start)) 691 } 692 // Make sure to stop the timer before we wait! The load created above 693 // is very heavy-weight and not easy to stop, so we could end up 694 // confusing the benchmarking framework for small b.N. 695 b.StopTimer() 696 stop() 697 698 // Disable the default */op metrics. 699 // ns/op doesn't mean anything because it's an average, but we 700 // have a sleep in our b.N loop above which skews this significantly. 701 b.ReportMetric(0, "ns/op") 702 b.ReportMetric(0, "B/op") 703 b.ReportMetric(0, "allocs/op") 704 705 // Sort latencies then report percentiles. 706 sort.Slice(latencies, func(i, j int) bool { 707 return latencies[i] < latencies[j] 708 }) 709 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns") 710 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns") 711 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") 712 } 713 714 func TestUserForcedGC(t *testing.T) { 715 // Test that runtime.GC() triggers a GC even if GOGC=off. 716 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 717 718 var ms1, ms2 runtime.MemStats 719 runtime.ReadMemStats(&ms1) 720 runtime.GC() 721 runtime.ReadMemStats(&ms2) 722 if ms1.NumGC == ms2.NumGC { 723 t.Fatalf("runtime.GC() did not trigger GC") 724 } 725 if ms1.NumForcedGC == ms2.NumForcedGC { 726 t.Fatalf("runtime.GC() was not accounted in NumForcedGC") 727 } 728 } 729 730 func writeBarrierBenchmark(b *testing.B, f func()) { 731 runtime.GC() 732 var ms runtime.MemStats 733 runtime.ReadMemStats(&ms) 734 //b.Logf("heap size: %d MB", ms.HeapAlloc>>20) 735 736 // Keep GC running continuously during the benchmark, which in 737 // turn keeps the write barrier on continuously. 738 var stop uint32 739 done := make(chan bool) 740 go func() { 741 for atomic.LoadUint32(&stop) == 0 { 742 runtime.GC() 743 } 744 close(done) 745 }() 746 defer func() { 747 atomic.StoreUint32(&stop, 1) 748 <-done 749 }() 750 751 b.ResetTimer() 752 f() 753 b.StopTimer() 754 } 755 756 func BenchmarkWriteBarrier(b *testing.B) { 757 if runtime.GOMAXPROCS(-1) < 2 { 758 // We don't want GC to take our time. 759 b.Skip("need GOMAXPROCS >= 2") 760 } 761 762 // Construct a large tree both so the GC runs for a while and 763 // so we have a data structure to manipulate the pointers of. 764 type node struct { 765 l, r *node 766 } 767 var wbRoots []*node 768 var mkTree func(level int) *node 769 mkTree = func(level int) *node { 770 if level == 0 { 771 return nil 772 } 773 n := &node{mkTree(level - 1), mkTree(level - 1)} 774 if level == 10 { 775 // Seed GC with enough early pointers so it 776 // doesn't start termination barriers when it 777 // only has the top of the tree. 778 wbRoots = append(wbRoots, n) 779 } 780 return n 781 } 782 const depth = 22 // 64 MB 783 root := mkTree(22) 784 785 writeBarrierBenchmark(b, func() { 786 var stack [depth]*node 787 tos := -1 788 789 // There are two write barriers per iteration, so i+=2. 790 for i := 0; i < b.N; i += 2 { 791 if tos == -1 { 792 stack[0] = root 793 tos = 0 794 } 795 796 // Perform one step of reversing the tree. 797 n := stack[tos] 798 if n.l == nil { 799 tos-- 800 } else { 801 n.l, n.r = n.r, n.l 802 stack[tos] = n.l 803 stack[tos+1] = n.r 804 tos++ 805 } 806 807 if i%(1<<12) == 0 { 808 // Avoid non-preemptible loops (see issue #10958). 809 runtime.Gosched() 810 } 811 } 812 }) 813 814 runtime.KeepAlive(wbRoots) 815 } 816 817 func BenchmarkBulkWriteBarrier(b *testing.B) { 818 if runtime.GOMAXPROCS(-1) < 2 { 819 // We don't want GC to take our time. 820 b.Skip("need GOMAXPROCS >= 2") 821 } 822 823 // Construct a large set of objects we can copy around. 824 const heapSize = 64 << 20 825 type obj [16]*byte 826 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{})) 827 for i := range ptrs { 828 ptrs[i] = new(obj) 829 } 830 831 writeBarrierBenchmark(b, func() { 832 const blockSize = 1024 833 var pos int 834 for i := 0; i < b.N; i += blockSize { 835 // Rotate block. 836 block := ptrs[pos : pos+blockSize] 837 first := block[0] 838 copy(block, block[1:]) 839 block[blockSize-1] = first 840 841 pos += blockSize 842 if pos+blockSize > len(ptrs) { 843 pos = 0 844 } 845 846 runtime.Gosched() 847 } 848 }) 849 850 runtime.KeepAlive(ptrs) 851 } 852 853 func BenchmarkScanStackNoLocals(b *testing.B) { 854 var ready sync.WaitGroup 855 teardown := make(chan bool) 856 for j := 0; j < 10; j++ { 857 ready.Add(1) 858 go func() { 859 x := 100000 860 countpwg(&x, &ready, teardown) 861 }() 862 } 863 ready.Wait() 864 b.ResetTimer() 865 for i := 0; i < b.N; i++ { 866 b.StartTimer() 867 runtime.GC() 868 runtime.GC() 869 b.StopTimer() 870 } 871 close(teardown) 872 } 873 874 func BenchmarkMSpanCountAlloc(b *testing.B) { 875 // Allocate one dummy mspan for the whole benchmark. 876 s := runtime.AllocMSpan() 877 defer runtime.FreeMSpan(s) 878 879 // n is the number of bytes to benchmark against. 880 // n must always be a multiple of 8, since gcBits is 881 // always rounded up 8 bytes. 882 for _, n := range []int{8, 16, 32, 64, 128} { 883 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) { 884 // Initialize a new byte slice with pseduo-random data. 885 bits := make([]byte, n) 886 rand.Read(bits) 887 888 b.ResetTimer() 889 for i := 0; i < b.N; i++ { 890 runtime.MSpanCountAlloc(s, bits) 891 } 892 }) 893 } 894 } 895 896 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) { 897 if *n == 0 { 898 ready.Done() 899 <-teardown 900 return 901 } 902 *n-- 903 countpwg(n, ready, teardown) 904 }