github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/gc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "fmt" 9 "math/rand" 10 "os" 11 "reflect" 12 "runtime" 13 "runtime/debug" 14 "sort" 15 "strings" 16 "sync" 17 "sync/atomic" 18 "testing" 19 "time" 20 "unsafe" 21 ) 22 23 func TestGcSys(t *testing.T) { 24 if os.Getenv("GOGC") == "off" { 25 t.Skip("skipping test; GOGC=off in environment") 26 } 27 got := runTestProg(t, "testprog", "GCSys") 28 want := "OK\n" 29 if got != want { 30 t.Fatalf("expected %q, but got %q", want, got) 31 } 32 } 33 34 func TestGcDeepNesting(t *testing.T) { 35 type T [2][2][2][2][2][2][2][2][2][2]*int 36 a := new(T) 37 38 // Prevent the compiler from applying escape analysis. 39 // This makes sure new(T) is allocated on heap, not on the stack. 40 t.Logf("%p", a) 41 42 a[0][0][0][0][0][0][0][0][0][0] = new(int) 43 *a[0][0][0][0][0][0][0][0][0][0] = 13 44 runtime.GC() 45 if *a[0][0][0][0][0][0][0][0][0][0] != 13 { 46 t.Fail() 47 } 48 } 49 50 func TestGcMapIndirection(t *testing.T) { 51 defer debug.SetGCPercent(debug.SetGCPercent(1)) 52 runtime.GC() 53 type T struct { 54 a [256]int 55 } 56 m := make(map[T]T) 57 for i := 0; i < 2000; i++ { 58 var a T 59 a.a[0] = i 60 m[a] = T{} 61 } 62 } 63 64 func TestGcArraySlice(t *testing.T) { 65 type X struct { 66 buf [1]byte 67 nextbuf []byte 68 next *X 69 } 70 var head *X 71 for i := 0; i < 10; i++ { 72 p := &X{} 73 p.buf[0] = 42 74 p.next = head 75 if head != nil { 76 p.nextbuf = head.buf[:] 77 } 78 head = p 79 runtime.GC() 80 } 81 for p := head; p != nil; p = p.next { 82 if p.buf[0] != 42 { 83 t.Fatal("corrupted heap") 84 } 85 } 86 } 87 88 func TestGcRescan(t *testing.T) { 89 type X struct { 90 c chan error 91 nextx *X 92 } 93 type Y struct { 94 X 95 nexty *Y 96 p *int 97 } 98 var head *Y 99 for i := 0; i < 10; i++ { 100 p := &Y{} 101 p.c = make(chan error) 102 if head != nil { 103 p.nextx = &head.X 104 } 105 p.nexty = head 106 p.p = new(int) 107 *p.p = 42 108 head = p 109 runtime.GC() 110 } 111 for p := head; p != nil; p = p.nexty { 112 if *p.p != 42 { 113 t.Fatal("corrupted heap") 114 } 115 } 116 } 117 118 func TestGcLastTime(t *testing.T) { 119 ms := new(runtime.MemStats) 120 t0 := time.Now().UnixNano() 121 runtime.GC() 122 t1 := time.Now().UnixNano() 123 runtime.ReadMemStats(ms) 124 last := int64(ms.LastGC) 125 if t0 > last || last > t1 { 126 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) 127 } 128 pause := ms.PauseNs[(ms.NumGC+255)%256] 129 // Due to timer granularity, pause can actually be 0 on windows 130 // or on virtualized environments. 131 if pause == 0 { 132 t.Logf("last GC pause was 0") 133 } else if pause > 10e9 { 134 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) 135 } 136 } 137 138 var hugeSink interface{} 139 140 func TestHugeGCInfo(t *testing.T) { 141 // The test ensures that compiler can chew these huge types even on weakest machines. 142 // The types are not allocated at runtime. 143 if hugeSink != nil { 144 // 400MB on 32 bots, 4TB on 64-bits. 145 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 146 hugeSink = new([n]*byte) 147 hugeSink = new([n]uintptr) 148 hugeSink = new(struct { 149 x float64 150 y [n]*byte 151 z []string 152 }) 153 hugeSink = new(struct { 154 x float64 155 y [n]uintptr 156 z []string 157 }) 158 } 159 } 160 161 func TestPeriodicGC(t *testing.T) { 162 if runtime.GOARCH == "wasm" { 163 t.Skip("no sysmon on wasm yet") 164 } 165 166 // Make sure we're not in the middle of a GC. 167 runtime.GC() 168 169 var ms1, ms2 runtime.MemStats 170 runtime.ReadMemStats(&ms1) 171 172 // Make periodic GC run continuously. 173 orig := *runtime.ForceGCPeriod 174 *runtime.ForceGCPeriod = 0 175 176 // Let some periodic GCs happen. In a heavily loaded system, 177 // it's possible these will be delayed, so this is designed to 178 // succeed quickly if things are working, but to give it some 179 // slack if things are slow. 180 var numGCs uint32 181 const want = 2 182 for i := 0; i < 200 && numGCs < want; i++ { 183 time.Sleep(5 * time.Millisecond) 184 185 // Test that periodic GC actually happened. 186 runtime.ReadMemStats(&ms2) 187 numGCs = ms2.NumGC - ms1.NumGC 188 } 189 *runtime.ForceGCPeriod = orig 190 191 if numGCs < want { 192 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs) 193 } 194 } 195 196 func TestGcZombieReporting(t *testing.T) { 197 // This test is somewhat sensitive to how the allocator works. 198 got := runTestProg(t, "testprog", "GCZombie") 199 want := "found pointer to free object" 200 if !strings.Contains(got, want) { 201 t.Fatalf("expected %q in output, but got %q", want, got) 202 } 203 } 204 205 func BenchmarkSetTypePtr(b *testing.B) { 206 benchSetType(b, new(*byte)) 207 } 208 209 func BenchmarkSetTypePtr8(b *testing.B) { 210 benchSetType(b, new([8]*byte)) 211 } 212 213 func BenchmarkSetTypePtr16(b *testing.B) { 214 benchSetType(b, new([16]*byte)) 215 } 216 217 func BenchmarkSetTypePtr32(b *testing.B) { 218 benchSetType(b, new([32]*byte)) 219 } 220 221 func BenchmarkSetTypePtr64(b *testing.B) { 222 benchSetType(b, new([64]*byte)) 223 } 224 225 func BenchmarkSetTypePtr126(b *testing.B) { 226 benchSetType(b, new([126]*byte)) 227 } 228 229 func BenchmarkSetTypePtr128(b *testing.B) { 230 benchSetType(b, new([128]*byte)) 231 } 232 233 func BenchmarkSetTypePtrSlice(b *testing.B) { 234 benchSetType(b, make([]*byte, 1<<10)) 235 } 236 237 type Node1 struct { 238 Value [1]uintptr 239 Left, Right *byte 240 } 241 242 func BenchmarkSetTypeNode1(b *testing.B) { 243 benchSetType(b, new(Node1)) 244 } 245 246 func BenchmarkSetTypeNode1Slice(b *testing.B) { 247 benchSetType(b, make([]Node1, 32)) 248 } 249 250 type Node8 struct { 251 Value [8]uintptr 252 Left, Right *byte 253 } 254 255 func BenchmarkSetTypeNode8(b *testing.B) { 256 benchSetType(b, new(Node8)) 257 } 258 259 func BenchmarkSetTypeNode8Slice(b *testing.B) { 260 benchSetType(b, make([]Node8, 32)) 261 } 262 263 type Node64 struct { 264 Value [64]uintptr 265 Left, Right *byte 266 } 267 268 func BenchmarkSetTypeNode64(b *testing.B) { 269 benchSetType(b, new(Node64)) 270 } 271 272 func BenchmarkSetTypeNode64Slice(b *testing.B) { 273 benchSetType(b, make([]Node64, 32)) 274 } 275 276 type Node64Dead struct { 277 Left, Right *byte 278 Value [64]uintptr 279 } 280 281 func BenchmarkSetTypeNode64Dead(b *testing.B) { 282 benchSetType(b, new(Node64Dead)) 283 } 284 285 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) { 286 benchSetType(b, make([]Node64Dead, 32)) 287 } 288 289 type Node124 struct { 290 Value [124]uintptr 291 Left, Right *byte 292 } 293 294 func BenchmarkSetTypeNode124(b *testing.B) { 295 benchSetType(b, new(Node124)) 296 } 297 298 func BenchmarkSetTypeNode124Slice(b *testing.B) { 299 benchSetType(b, make([]Node124, 32)) 300 } 301 302 type Node126 struct { 303 Value [126]uintptr 304 Left, Right *byte 305 } 306 307 func BenchmarkSetTypeNode126(b *testing.B) { 308 benchSetType(b, new(Node126)) 309 } 310 311 func BenchmarkSetTypeNode126Slice(b *testing.B) { 312 benchSetType(b, make([]Node126, 32)) 313 } 314 315 type Node128 struct { 316 Value [128]uintptr 317 Left, Right *byte 318 } 319 320 func BenchmarkSetTypeNode128(b *testing.B) { 321 benchSetType(b, new(Node128)) 322 } 323 324 func BenchmarkSetTypeNode128Slice(b *testing.B) { 325 benchSetType(b, make([]Node128, 32)) 326 } 327 328 type Node130 struct { 329 Value [130]uintptr 330 Left, Right *byte 331 } 332 333 func BenchmarkSetTypeNode130(b *testing.B) { 334 benchSetType(b, new(Node130)) 335 } 336 337 func BenchmarkSetTypeNode130Slice(b *testing.B) { 338 benchSetType(b, make([]Node130, 32)) 339 } 340 341 type Node1024 struct { 342 Value [1024]uintptr 343 Left, Right *byte 344 } 345 346 func BenchmarkSetTypeNode1024(b *testing.B) { 347 benchSetType(b, new(Node1024)) 348 } 349 350 func BenchmarkSetTypeNode1024Slice(b *testing.B) { 351 benchSetType(b, make([]Node1024, 32)) 352 } 353 354 func benchSetType(b *testing.B, x interface{}) { 355 v := reflect.ValueOf(x) 356 t := v.Type() 357 switch t.Kind() { 358 case reflect.Ptr: 359 b.SetBytes(int64(t.Elem().Size())) 360 case reflect.Slice: 361 b.SetBytes(int64(t.Elem().Size()) * int64(v.Len())) 362 } 363 b.ResetTimer() 364 runtime.BenchSetType(b.N, x) 365 } 366 367 func BenchmarkAllocation(b *testing.B) { 368 type T struct { 369 x, y *byte 370 } 371 ngo := runtime.GOMAXPROCS(0) 372 work := make(chan bool, b.N+ngo) 373 result := make(chan *T) 374 for i := 0; i < b.N; i++ { 375 work <- true 376 } 377 for i := 0; i < ngo; i++ { 378 work <- false 379 } 380 for i := 0; i < ngo; i++ { 381 go func() { 382 var x *T 383 for <-work { 384 for i := 0; i < 1000; i++ { 385 x = &T{} 386 } 387 } 388 result <- x 389 }() 390 } 391 for i := 0; i < ngo; i++ { 392 <-result 393 } 394 } 395 396 func TestPrintGC(t *testing.T) { 397 if testing.Short() { 398 t.Skip("Skipping in short mode") 399 } 400 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 401 done := make(chan bool) 402 go func() { 403 for { 404 select { 405 case <-done: 406 return 407 default: 408 runtime.GC() 409 } 410 } 411 }() 412 for i := 0; i < 1e4; i++ { 413 func() { 414 defer print("") 415 }() 416 } 417 close(done) 418 } 419 420 func testTypeSwitch(x interface{}) error { 421 switch y := x.(type) { 422 case nil: 423 // ok 424 case error: 425 return y 426 } 427 return nil 428 } 429 430 func testAssert(x interface{}) error { 431 if y, ok := x.(error); ok { 432 return y 433 } 434 return nil 435 } 436 437 func testAssertVar(x interface{}) error { 438 var y, ok = x.(error) 439 if ok { 440 return y 441 } 442 return nil 443 } 444 445 var a bool 446 447 //go:noinline 448 func testIfaceEqual(x interface{}) { 449 if x == "abc" { 450 a = true 451 } 452 } 453 454 func TestPageAccounting(t *testing.T) { 455 // Grow the heap in small increments. This used to drop the 456 // pages-in-use count below zero because of a rounding 457 // mismatch (golang.org/issue/15022). 458 const blockSize = 64 << 10 459 blocks := make([]*[blockSize]byte, (64<<20)/blockSize) 460 for i := range blocks { 461 blocks[i] = new([blockSize]byte) 462 } 463 464 // Check that the running page count matches reality. 465 pagesInUse, counted := runtime.CountPagesInUse() 466 if pagesInUse != counted { 467 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted) 468 } 469 } 470 471 func TestReadMemStats(t *testing.T) { 472 base, slow := runtime.ReadMemStatsSlow() 473 if base != slow { 474 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow)) 475 t.Fatal("memstats mismatch") 476 } 477 } 478 479 func logDiff(t *testing.T, prefix string, got, want reflect.Value) { 480 typ := got.Type() 481 switch typ.Kind() { 482 case reflect.Array, reflect.Slice: 483 if got.Len() != want.Len() { 484 t.Logf("len(%s): got %v, want %v", prefix, got, want) 485 return 486 } 487 for i := 0; i < got.Len(); i++ { 488 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i)) 489 } 490 case reflect.Struct: 491 for i := 0; i < typ.NumField(); i++ { 492 gf, wf := got.Field(i), want.Field(i) 493 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf) 494 } 495 case reflect.Map: 496 t.Fatal("not implemented: logDiff for map") 497 default: 498 if got.Interface() != want.Interface() { 499 t.Logf("%s: got %v, want %v", prefix, got, want) 500 } 501 } 502 } 503 504 func BenchmarkReadMemStats(b *testing.B) { 505 var ms runtime.MemStats 506 const heapSize = 100 << 20 507 x := make([]*[1024]byte, heapSize/1024) 508 for i := range x { 509 x[i] = new([1024]byte) 510 } 511 hugeSink = x 512 513 b.ResetTimer() 514 for i := 0; i < b.N; i++ { 515 runtime.ReadMemStats(&ms) 516 } 517 518 hugeSink = nil 519 } 520 521 func applyGCLoad(b *testing.B) func() { 522 // We’ll apply load to the runtime with maxProcs-1 goroutines 523 // and use one more to actually benchmark. It doesn't make sense 524 // to try to run this test with only 1 P (that's what 525 // BenchmarkReadMemStats is for). 526 maxProcs := runtime.GOMAXPROCS(-1) 527 if maxProcs == 1 { 528 b.Skip("This benchmark can only be run with GOMAXPROCS > 1") 529 } 530 531 // Code to build a big tree with lots of pointers. 532 type node struct { 533 children [16]*node 534 } 535 var buildTree func(depth int) *node 536 buildTree = func(depth int) *node { 537 tree := new(node) 538 if depth != 0 { 539 for i := range tree.children { 540 tree.children[i] = buildTree(depth - 1) 541 } 542 } 543 return tree 544 } 545 546 // Keep the GC busy by continuously generating large trees. 547 done := make(chan struct{}) 548 var wg sync.WaitGroup 549 for i := 0; i < maxProcs-1; i++ { 550 wg.Add(1) 551 go func() { 552 defer wg.Done() 553 var hold *node 554 loop: 555 for { 556 hold = buildTree(5) 557 select { 558 case <-done: 559 break loop 560 default: 561 } 562 } 563 runtime.KeepAlive(hold) 564 }() 565 } 566 return func() { 567 close(done) 568 wg.Wait() 569 } 570 } 571 572 func BenchmarkReadMemStatsLatency(b *testing.B) { 573 stop := applyGCLoad(b) 574 575 // Spend this much time measuring latencies. 576 latencies := make([]time.Duration, 0, 1024) 577 578 // Run for timeToBench hitting ReadMemStats continuously 579 // and measuring the latency. 580 b.ResetTimer() 581 var ms runtime.MemStats 582 for i := 0; i < b.N; i++ { 583 // Sleep for a bit, otherwise we're just going to keep 584 // stopping the world and no one will get to do anything. 585 time.Sleep(100 * time.Millisecond) 586 start := time.Now() 587 runtime.ReadMemStats(&ms) 588 latencies = append(latencies, time.Now().Sub(start)) 589 } 590 // Make sure to stop the timer before we wait! The load created above 591 // is very heavy-weight and not easy to stop, so we could end up 592 // confusing the benchmarking framework for small b.N. 593 b.StopTimer() 594 stop() 595 596 // Disable the default */op metrics. 597 // ns/op doesn't mean anything because it's an average, but we 598 // have a sleep in our b.N loop above which skews this significantly. 599 b.ReportMetric(0, "ns/op") 600 b.ReportMetric(0, "B/op") 601 b.ReportMetric(0, "allocs/op") 602 603 // Sort latencies then report percentiles. 604 sort.Slice(latencies, func(i, j int) bool { 605 return latencies[i] < latencies[j] 606 }) 607 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns") 608 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns") 609 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") 610 } 611 612 func TestUserForcedGC(t *testing.T) { 613 // Test that runtime.GC() triggers a GC even if GOGC=off. 614 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 615 616 var ms1, ms2 runtime.MemStats 617 runtime.ReadMemStats(&ms1) 618 runtime.GC() 619 runtime.ReadMemStats(&ms2) 620 if ms1.NumGC == ms2.NumGC { 621 t.Fatalf("runtime.GC() did not trigger GC") 622 } 623 if ms1.NumForcedGC == ms2.NumForcedGC { 624 t.Fatalf("runtime.GC() was not accounted in NumForcedGC") 625 } 626 } 627 628 func writeBarrierBenchmark(b *testing.B, f func()) { 629 runtime.GC() 630 var ms runtime.MemStats 631 runtime.ReadMemStats(&ms) 632 //b.Logf("heap size: %d MB", ms.HeapAlloc>>20) 633 634 // Keep GC running continuously during the benchmark, which in 635 // turn keeps the write barrier on continuously. 636 var stop uint32 637 done := make(chan bool) 638 go func() { 639 for atomic.LoadUint32(&stop) == 0 { 640 runtime.GC() 641 } 642 close(done) 643 }() 644 defer func() { 645 atomic.StoreUint32(&stop, 1) 646 <-done 647 }() 648 649 b.ResetTimer() 650 f() 651 b.StopTimer() 652 } 653 654 func BenchmarkWriteBarrier(b *testing.B) { 655 if runtime.GOMAXPROCS(-1) < 2 { 656 // We don't want GC to take our time. 657 b.Skip("need GOMAXPROCS >= 2") 658 } 659 660 // Construct a large tree both so the GC runs for a while and 661 // so we have a data structure to manipulate the pointers of. 662 type node struct { 663 l, r *node 664 } 665 var wbRoots []*node 666 var mkTree func(level int) *node 667 mkTree = func(level int) *node { 668 if level == 0 { 669 return nil 670 } 671 n := &node{mkTree(level - 1), mkTree(level - 1)} 672 if level == 10 { 673 // Seed GC with enough early pointers so it 674 // doesn't start termination barriers when it 675 // only has the top of the tree. 676 wbRoots = append(wbRoots, n) 677 } 678 return n 679 } 680 const depth = 22 // 64 MB 681 root := mkTree(22) 682 683 writeBarrierBenchmark(b, func() { 684 var stack [depth]*node 685 tos := -1 686 687 // There are two write barriers per iteration, so i+=2. 688 for i := 0; i < b.N; i += 2 { 689 if tos == -1 { 690 stack[0] = root 691 tos = 0 692 } 693 694 // Perform one step of reversing the tree. 695 n := stack[tos] 696 if n.l == nil { 697 tos-- 698 } else { 699 n.l, n.r = n.r, n.l 700 stack[tos] = n.l 701 stack[tos+1] = n.r 702 tos++ 703 } 704 705 if i%(1<<12) == 0 { 706 // Avoid non-preemptible loops (see issue #10958). 707 runtime.Gosched() 708 } 709 } 710 }) 711 712 runtime.KeepAlive(wbRoots) 713 } 714 715 func BenchmarkBulkWriteBarrier(b *testing.B) { 716 if runtime.GOMAXPROCS(-1) < 2 { 717 // We don't want GC to take our time. 718 b.Skip("need GOMAXPROCS >= 2") 719 } 720 721 // Construct a large set of objects we can copy around. 722 const heapSize = 64 << 20 723 type obj [16]*byte 724 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{})) 725 for i := range ptrs { 726 ptrs[i] = new(obj) 727 } 728 729 writeBarrierBenchmark(b, func() { 730 const blockSize = 1024 731 var pos int 732 for i := 0; i < b.N; i += blockSize { 733 // Rotate block. 734 block := ptrs[pos : pos+blockSize] 735 first := block[0] 736 copy(block, block[1:]) 737 block[blockSize-1] = first 738 739 pos += blockSize 740 if pos+blockSize > len(ptrs) { 741 pos = 0 742 } 743 744 runtime.Gosched() 745 } 746 }) 747 748 runtime.KeepAlive(ptrs) 749 } 750 751 func BenchmarkScanStackNoLocals(b *testing.B) { 752 var ready sync.WaitGroup 753 teardown := make(chan bool) 754 for j := 0; j < 10; j++ { 755 ready.Add(1) 756 go func() { 757 x := 100000 758 countpwg(&x, &ready, teardown) 759 }() 760 } 761 ready.Wait() 762 b.ResetTimer() 763 for i := 0; i < b.N; i++ { 764 b.StartTimer() 765 runtime.GC() 766 runtime.GC() 767 b.StopTimer() 768 } 769 close(teardown) 770 } 771 772 func BenchmarkMSpanCountAlloc(b *testing.B) { 773 // Allocate one dummy mspan for the whole benchmark. 774 s := runtime.AllocMSpan() 775 defer runtime.FreeMSpan(s) 776 777 // n is the number of bytes to benchmark against. 778 // n must always be a multiple of 8, since gcBits is 779 // always rounded up 8 bytes. 780 for _, n := range []int{8, 16, 32, 64, 128} { 781 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) { 782 // Initialize a new byte slice with pseduo-random data. 783 bits := make([]byte, n) 784 rand.Read(bits) 785 786 b.ResetTimer() 787 for i := 0; i < b.N; i++ { 788 runtime.MSpanCountAlloc(s, bits) 789 } 790 }) 791 } 792 } 793 794 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) { 795 if *n == 0 { 796 ready.Done() 797 <-teardown 798 return 799 } 800 *n-- 801 countpwg(n, ready, teardown) 802 }