github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/arena_test.go (about) 1 // Copyright 2022 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "internal/goarch" 9 "reflect" 10 . "runtime" 11 "runtime/debug" 12 "runtime/internal/atomic" 13 "testing" 14 "time" 15 "unsafe" 16 ) 17 18 type smallScalar struct { 19 X uintptr 20 } 21 type smallPointer struct { 22 X *smallPointer 23 } 24 type smallPointerMix struct { 25 A *smallPointer 26 B byte 27 C *smallPointer 28 D [11]byte 29 } 30 type mediumScalarEven [8192]byte 31 type mediumScalarOdd [3321]byte 32 type mediumPointerEven [1024]*smallPointer 33 type mediumPointerOdd [1023]*smallPointer 34 35 type largeScalar [UserArenaChunkBytes + 1]byte 36 type largePointer [UserArenaChunkBytes/unsafe.Sizeof(&smallPointer{}) + 1]*smallPointer 37 38 func TestUserArena(t *testing.T) { 39 // Set GOMAXPROCS to 2 so we don't run too many of these 40 // tests in parallel. 41 defer GOMAXPROCS(GOMAXPROCS(2)) 42 43 // Start a subtest so that we can clean up after any parallel tests within. 44 t.Run("Alloc", func(t *testing.T) { 45 ss := &smallScalar{5} 46 runSubTestUserArenaNew(t, ss, true) 47 48 sp := &smallPointer{new(smallPointer)} 49 runSubTestUserArenaNew(t, sp, true) 50 51 spm := &smallPointerMix{sp, 5, nil, [11]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}} 52 runSubTestUserArenaNew(t, spm, true) 53 54 mse := new(mediumScalarEven) 55 for i := range mse { 56 mse[i] = 121 57 } 58 runSubTestUserArenaNew(t, mse, true) 59 60 mso := new(mediumScalarOdd) 61 for i := range mso { 62 mso[i] = 122 63 } 64 runSubTestUserArenaNew(t, mso, true) 65 66 mpe := new(mediumPointerEven) 67 for i := range mpe { 68 mpe[i] = sp 69 } 70 runSubTestUserArenaNew(t, mpe, true) 71 72 mpo := new(mediumPointerOdd) 73 for i := range mpo { 74 mpo[i] = sp 75 } 76 runSubTestUserArenaNew(t, mpo, true) 77 78 ls := new(largeScalar) 79 for i := range ls { 80 ls[i] = 123 81 } 82 // Not in parallel because we don't want to hold this large allocation live. 83 runSubTestUserArenaNew(t, ls, false) 84 85 lp := new(largePointer) 86 for i := range lp { 87 lp[i] = sp 88 } 89 // Not in parallel because we don't want to hold this large allocation live. 90 runSubTestUserArenaNew(t, lp, false) 91 92 sss := make([]smallScalar, 25) 93 for i := range sss { 94 sss[i] = smallScalar{12} 95 } 96 runSubTestUserArenaSlice(t, sss, true) 97 98 mpos := make([]mediumPointerOdd, 5) 99 for i := range mpos { 100 mpos[i] = *mpo 101 } 102 runSubTestUserArenaSlice(t, mpos, true) 103 104 sps := make([]smallPointer, UserArenaChunkBytes/unsafe.Sizeof(smallPointer{})+1) 105 for i := range sps { 106 sps[i] = *sp 107 } 108 // Not in parallel because we don't want to hold this large allocation live. 109 runSubTestUserArenaSlice(t, sps, false) 110 111 // Test zero-sized types. 112 t.Run("struct{}", func(t *testing.T) { 113 arena := NewUserArena() 114 var x any 115 x = (*struct{})(nil) 116 arena.New(&x) 117 if v := unsafe.Pointer(x.(*struct{})); v != ZeroBase { 118 t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) 119 } 120 arena.Free() 121 }) 122 t.Run("[]struct{}", func(t *testing.T) { 123 arena := NewUserArena() 124 var sl []struct{} 125 arena.Slice(&sl, 10) 126 if v := unsafe.Pointer(&sl[0]); v != ZeroBase { 127 t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) 128 } 129 arena.Free() 130 }) 131 t.Run("[]int (cap 0)", func(t *testing.T) { 132 arena := NewUserArena() 133 var sl []int 134 arena.Slice(&sl, 0) 135 if len(sl) != 0 { 136 t.Errorf("expected requested zero-sized slice to still have zero length: got %x, want 0", len(sl)) 137 } 138 arena.Free() 139 }) 140 }) 141 142 // Run a GC cycle to get any arenas off the quarantine list. 143 GC() 144 145 if n := GlobalWaitingArenaChunks(); n != 0 { 146 t.Errorf("expected zero waiting arena chunks, found %d", n) 147 } 148 } 149 150 func runSubTestUserArenaNew[S comparable](t *testing.T, value *S, parallel bool) { 151 t.Run(reflect.TypeOf(value).Elem().Name(), func(t *testing.T) { 152 if parallel { 153 t.Parallel() 154 } 155 156 // Allocate and write data, enough to exhaust the arena. 157 // 158 // This is an underestimate, likely leaving some space in the arena. That's a good thing, 159 // because it gives us coverage of boundary cases. 160 n := int(UserArenaChunkBytes / unsafe.Sizeof(*value)) 161 if n == 0 { 162 n = 1 163 } 164 165 // Create a new arena and do a bunch of operations on it. 166 arena := NewUserArena() 167 168 arenaValues := make([]*S, 0, n) 169 for j := 0; j < n; j++ { 170 var x any 171 x = (*S)(nil) 172 arena.New(&x) 173 s := x.(*S) 174 *s = *value 175 arenaValues = append(arenaValues, s) 176 } 177 // Check integrity of allocated data. 178 for _, s := range arenaValues { 179 if *s != *value { 180 t.Errorf("failed integrity check: got %#v, want %#v", *s, *value) 181 } 182 } 183 184 // Release the arena. 185 arena.Free() 186 }) 187 } 188 189 func runSubTestUserArenaSlice[S comparable](t *testing.T, value []S, parallel bool) { 190 t.Run("[]"+reflect.TypeOf(value).Elem().Name(), func(t *testing.T) { 191 if parallel { 192 t.Parallel() 193 } 194 195 // Allocate and write data, enough to exhaust the arena. 196 // 197 // This is an underestimate, likely leaving some space in the arena. That's a good thing, 198 // because it gives us coverage of boundary cases. 199 n := int(UserArenaChunkBytes / (unsafe.Sizeof(*new(S)) * uintptr(cap(value)))) 200 if n == 0 { 201 n = 1 202 } 203 204 // Create a new arena and do a bunch of operations on it. 205 arena := NewUserArena() 206 207 arenaValues := make([][]S, 0, n) 208 for j := 0; j < n; j++ { 209 var sl []S 210 arena.Slice(&sl, cap(value)) 211 copy(sl, value) 212 arenaValues = append(arenaValues, sl) 213 } 214 // Check integrity of allocated data. 215 for _, sl := range arenaValues { 216 for i := range sl { 217 got := sl[i] 218 want := value[i] 219 if got != want { 220 t.Errorf("failed integrity check: got %#v, want %#v at index %d", got, want, i) 221 } 222 } 223 } 224 225 // Release the arena. 226 arena.Free() 227 }) 228 } 229 230 func TestUserArenaLiveness(t *testing.T) { 231 t.Run("Free", func(t *testing.T) { 232 testUserArenaLiveness(t, false) 233 }) 234 t.Run("Finalizer", func(t *testing.T) { 235 testUserArenaLiveness(t, true) 236 }) 237 } 238 239 func testUserArenaLiveness(t *testing.T, useArenaFinalizer bool) { 240 // Disable the GC so that there's zero chance we try doing anything arena related *during* 241 // a mark phase, since otherwise a bunch of arenas could end up on the fault list. 242 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 243 244 // Defensively ensure that any full arena chunks leftover from previous tests have been cleared. 245 GC() 246 GC() 247 248 arena := NewUserArena() 249 250 // Allocate a few pointer-ful but un-initialized objects so that later we can 251 // place a reference to heap object at a more interesting location. 252 for i := 0; i < 3; i++ { 253 var x any 254 x = (*mediumPointerOdd)(nil) 255 arena.New(&x) 256 } 257 258 var x any 259 x = (*smallPointerMix)(nil) 260 arena.New(&x) 261 v := x.(*smallPointerMix) 262 263 var safeToFinalize atomic.Bool 264 var finalized atomic.Bool 265 v.C = new(smallPointer) 266 SetFinalizer(v.C, func(_ *smallPointer) { 267 if !safeToFinalize.Load() { 268 t.Error("finalized arena-referenced object unexpectedly") 269 } 270 finalized.Store(true) 271 }) 272 273 // Make sure it stays alive. 274 GC() 275 GC() 276 277 // In order to ensure the object can be freed, we now need to make sure to use 278 // the entire arena. Exhaust the rest of the arena. 279 280 for i := 0; i < int(UserArenaChunkBytes/unsafe.Sizeof(mediumScalarEven{})); i++ { 281 var x any 282 x = (*mediumScalarEven)(nil) 283 arena.New(&x) 284 } 285 286 // Make sure it stays alive again. 287 GC() 288 GC() 289 290 v = nil 291 292 safeToFinalize.Store(true) 293 if useArenaFinalizer { 294 arena = nil 295 296 // Try to queue the arena finalizer. 297 GC() 298 GC() 299 300 // In order for the finalizer we actually want to run to execute, 301 // we need to make sure this one runs first. 302 if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { 303 t.Fatal("finalizer queue was never emptied") 304 } 305 } else { 306 // Free the arena explicitly. 307 arena.Free() 308 } 309 310 // Try to queue the object's finalizer that we set earlier. 311 GC() 312 GC() 313 314 if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { 315 t.Fatal("finalizer queue was never emptied") 316 } 317 if !finalized.Load() { 318 t.Error("expected arena-referenced object to be finalized") 319 } 320 } 321 322 func TestUserArenaClearsPointerBits(t *testing.T) { 323 // This is a regression test for a serious issue wherein if pointer bits 324 // aren't properly cleared, it's possible to allocate scalar data down 325 // into a previously pointer-ful area, causing misinterpretation by the GC. 326 327 // Create a large object, grab a pointer into it, and free it. 328 x := new([8 << 20]byte) 329 xp := uintptr(unsafe.Pointer(&x[124])) 330 var finalized atomic.Bool 331 SetFinalizer(x, func(_ *[8 << 20]byte) { 332 finalized.Store(true) 333 }) 334 335 // Write three chunks worth of pointer data. Three gives us a 336 // high likelihood that when we write 2 later, we'll get the behavior 337 // we want. 338 a := NewUserArena() 339 for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*3); i++ { 340 var x any 341 x = (*smallPointer)(nil) 342 a.New(&x) 343 } 344 a.Free() 345 346 // Recycle the arena chunks. 347 GC() 348 GC() 349 350 a = NewUserArena() 351 for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*2); i++ { 352 var x any 353 x = (*smallScalar)(nil) 354 a.New(&x) 355 v := x.(*smallScalar) 356 // Write a pointer that should not keep x alive. 357 *v = smallScalar{xp} 358 } 359 KeepAlive(x) 360 x = nil 361 362 // Try to free x. 363 GC() 364 GC() 365 366 if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { 367 t.Fatal("finalizer queue was never emptied") 368 } 369 if !finalized.Load() { 370 t.Fatal("heap allocation kept alive through non-pointer reference") 371 } 372 373 // Clean up the arena. 374 a.Free() 375 GC() 376 GC() 377 } 378 379 func TestUserArenaCloneString(t *testing.T) { 380 a := NewUserArena() 381 382 // A static string (not on heap or arena) 383 var s = "abcdefghij" 384 385 // Create a byte slice in the arena, initialize it with s 386 var b []byte 387 a.Slice(&b, len(s)) 388 copy(b, s) 389 390 // Create a string as using the same memory as the byte slice, hence in 391 // the arena. This could be an arena API, but hasn't really been needed 392 // yet. 393 var as string 394 asHeader := (*reflect.StringHeader)(unsafe.Pointer(&as)) 395 asHeader.Data = (*reflect.SliceHeader)(unsafe.Pointer(&b)).Data 396 asHeader.Len = len(b) 397 398 // Clone should make a copy of as, since it is in the arena. 399 asCopy := UserArenaClone(as) 400 if (*reflect.StringHeader)(unsafe.Pointer(&as)).Data == (*reflect.StringHeader)(unsafe.Pointer(&asCopy)).Data { 401 t.Error("Clone did not make a copy") 402 } 403 404 // Clone should make a copy of subAs, since subAs is just part of as and so is in the arena. 405 subAs := as[1:3] 406 subAsCopy := UserArenaClone(subAs) 407 if (*reflect.StringHeader)(unsafe.Pointer(&subAs)).Data == (*reflect.StringHeader)(unsafe.Pointer(&subAsCopy)).Data { 408 t.Error("Clone did not make a copy") 409 } 410 if len(subAs) != len(subAsCopy) { 411 t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(subAs), len(subAsCopy)) 412 } else { 413 for i := range subAs { 414 if subAs[i] != subAsCopy[i] { 415 t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, subAs[i], subAs[i]) 416 } 417 } 418 } 419 420 // Clone should not make a copy of doubleAs, since doubleAs will be on the heap. 421 doubleAs := as + as 422 doubleAsCopy := UserArenaClone(doubleAs) 423 if (*reflect.StringHeader)(unsafe.Pointer(&doubleAs)).Data != (*reflect.StringHeader)(unsafe.Pointer(&doubleAsCopy)).Data { 424 t.Error("Clone should not have made a copy") 425 } 426 427 // Clone should not make a copy of s, since s is a static string. 428 sCopy := UserArenaClone(s) 429 if (*reflect.StringHeader)(unsafe.Pointer(&s)).Data != (*reflect.StringHeader)(unsafe.Pointer(&sCopy)).Data { 430 t.Error("Clone should not have made a copy") 431 } 432 433 a.Free() 434 } 435 436 func TestUserArenaClonePointer(t *testing.T) { 437 a := NewUserArena() 438 439 // Clone should not make a copy of a heap-allocated smallScalar. 440 x := Escape(new(smallScalar)) 441 xCopy := UserArenaClone(x) 442 if unsafe.Pointer(x) != unsafe.Pointer(xCopy) { 443 t.Errorf("Clone should not have made a copy: %#v -> %#v", x, xCopy) 444 } 445 446 // Clone should make a copy of an arena-allocated smallScalar. 447 var i any 448 i = (*smallScalar)(nil) 449 a.New(&i) 450 xArena := i.(*smallScalar) 451 xArenaCopy := UserArenaClone(xArena) 452 if unsafe.Pointer(xArena) == unsafe.Pointer(xArenaCopy) { 453 t.Errorf("Clone should have made a copy: %#v -> %#v", xArena, xArenaCopy) 454 } 455 if *xArena != *xArenaCopy { 456 t.Errorf("Clone made an incorrect copy copy: %#v -> %#v", *xArena, *xArenaCopy) 457 } 458 459 a.Free() 460 } 461 462 func TestUserArenaCloneSlice(t *testing.T) { 463 a := NewUserArena() 464 465 // A static string (not on heap or arena) 466 var s = "klmnopqrstuv" 467 468 // Create a byte slice in the arena, initialize it with s 469 var b []byte 470 a.Slice(&b, len(s)) 471 copy(b, s) 472 473 // Clone should make a copy of b, since it is in the arena. 474 bCopy := UserArenaClone(b) 475 if unsafe.Pointer(&b[0]) == unsafe.Pointer(&bCopy[0]) { 476 t.Errorf("Clone did not make a copy: %#v -> %#v", b, bCopy) 477 } 478 if len(b) != len(bCopy) { 479 t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(b), len(bCopy)) 480 } else { 481 for i := range b { 482 if b[i] != bCopy[i] { 483 t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, b[i], bCopy[i]) 484 } 485 } 486 } 487 488 // Clone should make a copy of bSub, since bSub is just part of b and so is in the arena. 489 bSub := b[1:3] 490 bSubCopy := UserArenaClone(bSub) 491 if unsafe.Pointer(&bSub[0]) == unsafe.Pointer(&bSubCopy[0]) { 492 t.Errorf("Clone did not make a copy: %#v -> %#v", bSub, bSubCopy) 493 } 494 if len(bSub) != len(bSubCopy) { 495 t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(bSub), len(bSubCopy)) 496 } else { 497 for i := range bSub { 498 if bSub[i] != bSubCopy[i] { 499 t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, bSub[i], bSubCopy[i]) 500 } 501 } 502 } 503 504 // Clone should not make a copy of bNotArena, since it will not be in an arena. 505 bNotArena := make([]byte, len(s)) 506 copy(bNotArena, s) 507 bNotArenaCopy := UserArenaClone(bNotArena) 508 if unsafe.Pointer(&bNotArena[0]) != unsafe.Pointer(&bNotArenaCopy[0]) { 509 t.Error("Clone should not have made a copy") 510 } 511 512 a.Free() 513 } 514 515 func TestUserArenaClonePanic(t *testing.T) { 516 var s string 517 func() { 518 x := smallScalar{2} 519 defer func() { 520 if v := recover(); v != nil { 521 s = v.(string) 522 } 523 }() 524 UserArenaClone(x) 525 }() 526 if s == "" { 527 t.Errorf("expected panic from Clone") 528 } 529 }