github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/malloc_test.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "flag" 9 "fmt" 10 "internal/race" 11 "internal/testenv" 12 "os" 13 "os/exec" 14 "reflect" 15 "runtime" 16 . "runtime" 17 "strings" 18 "sync/atomic" 19 "testing" 20 "time" 21 "unsafe" 22 ) 23 24 var testMemStatsCount int 25 26 func TestMemStats(t *testing.T) { 27 testMemStatsCount++ 28 29 // Make sure there's at least one forced GC. 30 GC() 31 32 // Test that MemStats has sane values. 33 st := new(MemStats) 34 ReadMemStats(st) 35 36 nz := func(x any) error { 37 if x != reflect.Zero(reflect.TypeOf(x)).Interface() { 38 return nil 39 } 40 return fmt.Errorf("zero value") 41 } 42 le := func(thresh float64) func(any) error { 43 return func(x any) error { 44 // These sanity tests aren't necessarily valid 45 // with high -test.count values, so only run 46 // them once. 47 if testMemStatsCount > 1 { 48 return nil 49 } 50 51 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh { 52 return nil 53 } 54 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh) 55 } 56 } 57 eq := func(x any) func(any) error { 58 return func(y any) error { 59 if x == y { 60 return nil 61 } 62 return fmt.Errorf("want %v", x) 63 } 64 } 65 // Of the uint fields, HeapReleased, HeapIdle can be 0. 66 // PauseTotalNs can be 0 if timer resolution is poor. 67 fields := map[string][]func(any) error{ 68 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)}, 69 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)}, 70 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)}, 71 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)}, 72 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)}, 73 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)}, 74 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)}, 75 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)}, 76 "NextGC": {nz, le(1e10)}, "LastGC": {nz}, 77 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil, 78 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)}, 79 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)}, 80 "BySize": nil, 81 } 82 83 rst := reflect.ValueOf(st).Elem() 84 for i := 0; i < rst.Type().NumField(); i++ { 85 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface() 86 checks, ok := fields[name] 87 if !ok { 88 t.Errorf("unknown MemStats field %s", name) 89 continue 90 } 91 for _, check := range checks { 92 if err := check(val); err != nil { 93 t.Errorf("%s = %v: %s", name, val, err) 94 } 95 } 96 } 97 98 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+ 99 st.BuckHashSys+st.GCSys+st.OtherSys { 100 t.Fatalf("Bad sys value: %+v", *st) 101 } 102 103 if st.HeapIdle+st.HeapInuse != st.HeapSys { 104 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys) 105 } 106 107 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe { 108 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe) 109 } 110 111 var pauseTotal uint64 112 for _, pause := range st.PauseNs { 113 pauseTotal += pause 114 } 115 if int(st.NumGC) < len(st.PauseNs) { 116 // We have all pauses, so this should be exact. 117 if st.PauseTotalNs != pauseTotal { 118 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal) 119 } 120 for i := int(st.NumGC); i < len(st.PauseNs); i++ { 121 if st.PauseNs[i] != 0 { 122 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st) 123 } 124 if st.PauseEnd[i] != 0 { 125 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st) 126 } 127 } 128 } else { 129 if st.PauseTotalNs < pauseTotal { 130 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal) 131 } 132 } 133 134 if st.NumForcedGC > st.NumGC { 135 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC) 136 } 137 } 138 139 func TestStringConcatenationAllocs(t *testing.T) { 140 n := testing.AllocsPerRun(1e3, func() { 141 b := make([]byte, 10) 142 for i := 0; i < 10; i++ { 143 b[i] = byte(i) + '0' 144 } 145 s := "foo" + string(b) 146 if want := "foo0123456789"; s != want { 147 t.Fatalf("want %v, got %v", want, s) 148 } 149 }) 150 // Only string concatenation allocates. 151 if n != 1 { 152 t.Fatalf("want 1 allocation, got %v", n) 153 } 154 } 155 156 func TestTinyAlloc(t *testing.T) { 157 if runtime.Raceenabled { 158 t.Skip("tinyalloc suppressed when running in race mode") 159 } 160 const N = 16 161 var v [N]unsafe.Pointer 162 for i := range v { 163 v[i] = unsafe.Pointer(new(byte)) 164 } 165 166 chunks := make(map[uintptr]bool, N) 167 for _, p := range v { 168 chunks[uintptr(p)&^7] = true 169 } 170 171 if len(chunks) == N { 172 t.Fatal("no bytes allocated within the same 8-byte chunk") 173 } 174 } 175 176 type obj12 struct { 177 a uint64 178 b uint32 179 } 180 181 func TestTinyAllocIssue37262(t *testing.T) { 182 if runtime.Raceenabled { 183 t.Skip("tinyalloc suppressed when running in race mode") 184 } 185 // Try to cause an alignment access fault 186 // by atomically accessing the first 64-bit 187 // value of a tiny-allocated object. 188 // See issue 37262 for details. 189 190 // GC twice, once to reach a stable heap state 191 // and again to make sure we finish the sweep phase. 192 runtime.GC() 193 runtime.GC() 194 195 // Disable preemption so we stay on one P's tiny allocator and 196 // nothing else allocates from it. 197 runtime.Acquirem() 198 199 // Make 1-byte allocations until we get a fresh tiny slot. 200 aligned := false 201 for i := 0; i < 16; i++ { 202 x := runtime.Escape(new(byte)) 203 if uintptr(unsafe.Pointer(x))&0xf == 0xf { 204 aligned = true 205 break 206 } 207 } 208 if !aligned { 209 runtime.Releasem() 210 t.Fatal("unable to get a fresh tiny slot") 211 } 212 213 // Create a 4-byte object so that the current 214 // tiny slot is partially filled. 215 runtime.Escape(new(uint32)) 216 217 // Create a 12-byte object, which fits into the 218 // tiny slot. If it actually gets place there, 219 // then the field "a" will be improperly aligned 220 // for atomic access on 32-bit architectures. 221 // This won't be true if issue 36606 gets resolved. 222 tinyObj12 := runtime.Escape(new(obj12)) 223 224 // Try to atomically access "x.a". 225 atomic.StoreUint64(&tinyObj12.a, 10) 226 227 runtime.Releasem() 228 } 229 230 func TestPageCacheLeak(t *testing.T) { 231 defer GOMAXPROCS(GOMAXPROCS(1)) 232 leaked := PageCachePagesLeaked() 233 if leaked != 0 { 234 t.Fatalf("found %d leaked pages in page caches", leaked) 235 } 236 } 237 238 func TestPhysicalMemoryUtilization(t *testing.T) { 239 got := runTestProg(t, "testprog", "GCPhys") 240 want := "OK\n" 241 if got != want { 242 t.Fatalf("expected %q, but got %q", want, got) 243 } 244 } 245 246 func TestScavengedBitsCleared(t *testing.T) { 247 var mismatches [128]BitsMismatch 248 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok { 249 t.Errorf("uncleared scavenged bits") 250 for _, m := range mismatches[:n] { 251 t.Logf("\t@ address 0x%x", m.Base) 252 t.Logf("\t| got: %064b", m.Got) 253 t.Logf("\t| want: %064b", m.Want) 254 } 255 t.FailNow() 256 } 257 } 258 259 type acLink struct { 260 x [1 << 20]byte 261 } 262 263 var arenaCollisionSink []*acLink 264 265 func TestArenaCollision(t *testing.T) { 266 testenv.MustHaveExec(t) 267 268 // Test that mheap.sysAlloc handles collisions with other 269 // memory mappings. 270 if os.Getenv("TEST_ARENA_COLLISION") != "1" { 271 cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v")) 272 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1") 273 out, err := cmd.CombinedOutput() 274 if race.Enabled { 275 // This test runs the runtime out of hint 276 // addresses, so it will start mapping the 277 // heap wherever it can. The race detector 278 // doesn't support this, so look for the 279 // expected failure. 280 if want := "too many address space collisions"; !strings.Contains(string(out), want) { 281 t.Fatalf("want %q, got:\n%s", want, string(out)) 282 } 283 } else if !strings.Contains(string(out), "PASS\n") || err != nil { 284 t.Fatalf("%s\n(exit status %v)", string(out), err) 285 } 286 return 287 } 288 disallowed := [][2]uintptr{} 289 // Drop all but the next 3 hints. 64-bit has a lot of hints, 290 // so it would take a lot of memory to go through all of them. 291 KeepNArenaHints(3) 292 // Consume these 3 hints and force the runtime to find some 293 // fallback hints. 294 for i := 0; i < 5; i++ { 295 // Reserve memory at the next hint so it can't be used 296 // for the heap. 297 start, end := MapNextArenaHint() 298 disallowed = append(disallowed, [2]uintptr{start, end}) 299 // Allocate until the runtime tries to use the hint we 300 // just mapped over. 301 hint := GetNextArenaHint() 302 for GetNextArenaHint() == hint { 303 ac := new(acLink) 304 arenaCollisionSink = append(arenaCollisionSink, ac) 305 // The allocation must not have fallen into 306 // one of the reserved regions. 307 p := uintptr(unsafe.Pointer(ac)) 308 for _, d := range disallowed { 309 if d[0] <= p && p < d[1] { 310 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1]) 311 } 312 } 313 } 314 } 315 } 316 317 var mallocSink uintptr 318 319 func BenchmarkMalloc8(b *testing.B) { 320 var x uintptr 321 for i := 0; i < b.N; i++ { 322 p := new(int64) 323 x ^= uintptr(unsafe.Pointer(p)) 324 } 325 mallocSink = x 326 } 327 328 func BenchmarkMalloc16(b *testing.B) { 329 var x uintptr 330 for i := 0; i < b.N; i++ { 331 p := new([2]int64) 332 x ^= uintptr(unsafe.Pointer(p)) 333 } 334 mallocSink = x 335 } 336 337 func BenchmarkMallocTypeInfo8(b *testing.B) { 338 var x uintptr 339 for i := 0; i < b.N; i++ { 340 p := new(struct { 341 p [8 / unsafe.Sizeof(uintptr(0))]*int 342 }) 343 x ^= uintptr(unsafe.Pointer(p)) 344 } 345 mallocSink = x 346 } 347 348 func BenchmarkMallocTypeInfo16(b *testing.B) { 349 var x uintptr 350 for i := 0; i < b.N; i++ { 351 p := new(struct { 352 p [16 / unsafe.Sizeof(uintptr(0))]*int 353 }) 354 x ^= uintptr(unsafe.Pointer(p)) 355 } 356 mallocSink = x 357 } 358 359 type LargeStruct struct { 360 x [16][]byte 361 } 362 363 func BenchmarkMallocLargeStruct(b *testing.B) { 364 var x uintptr 365 for i := 0; i < b.N; i++ { 366 p := make([]LargeStruct, 2) 367 x ^= uintptr(unsafe.Pointer(&p[0])) 368 } 369 mallocSink = x 370 } 371 372 var n = flag.Int("n", 1000, "number of goroutines") 373 374 func BenchmarkGoroutineSelect(b *testing.B) { 375 quit := make(chan struct{}) 376 read := func(ch chan struct{}) { 377 for { 378 select { 379 case _, ok := <-ch: 380 if !ok { 381 return 382 } 383 case <-quit: 384 return 385 } 386 } 387 } 388 benchHelper(b, *n, read) 389 } 390 391 func BenchmarkGoroutineBlocking(b *testing.B) { 392 read := func(ch chan struct{}) { 393 for { 394 if _, ok := <-ch; !ok { 395 return 396 } 397 } 398 } 399 benchHelper(b, *n, read) 400 } 401 402 func BenchmarkGoroutineForRange(b *testing.B) { 403 read := func(ch chan struct{}) { 404 for range ch { 405 } 406 } 407 benchHelper(b, *n, read) 408 } 409 410 func benchHelper(b *testing.B, n int, read func(chan struct{})) { 411 m := make([]chan struct{}, n) 412 for i := range m { 413 m[i] = make(chan struct{}, 1) 414 go read(m[i]) 415 } 416 b.StopTimer() 417 b.ResetTimer() 418 GC() 419 420 for i := 0; i < b.N; i++ { 421 for _, ch := range m { 422 if ch != nil { 423 ch <- struct{}{} 424 } 425 } 426 time.Sleep(10 * time.Millisecond) 427 b.StartTimer() 428 GC() 429 b.StopTimer() 430 } 431 432 for _, ch := range m { 433 close(ch) 434 } 435 time.Sleep(10 * time.Millisecond) 436 } 437 438 func BenchmarkGoroutineIdle(b *testing.B) { 439 quit := make(chan struct{}) 440 fn := func() { 441 <-quit 442 } 443 for i := 0; i < *n; i++ { 444 go fn() 445 } 446 447 GC() 448 b.ResetTimer() 449 450 for i := 0; i < b.N; i++ { 451 GC() 452 } 453 454 b.StopTimer() 455 close(quit) 456 time.Sleep(10 * time.Millisecond) 457 }