github.com/cznic/memory@v0.0.0-20181122101858-44f9dcde99e8/all_test.go (about) 1 // Copyright 2017 The Memory Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package memory 6 7 import ( 8 "bytes" 9 "fmt" 10 "math" 11 "os" 12 "path" 13 "runtime" 14 "strings" 15 "testing" 16 "unsafe" 17 18 "github.com/cznic/mathutil" 19 ) 20 21 func caller(s string, va ...interface{}) { 22 if s == "" { 23 s = strings.Repeat("%v ", len(va)) 24 } 25 _, fn, fl, _ := runtime.Caller(2) 26 fmt.Fprintf(os.Stderr, "# caller: %s:%d: ", path.Base(fn), fl) 27 fmt.Fprintf(os.Stderr, s, va...) 28 fmt.Fprintln(os.Stderr) 29 _, fn, fl, _ = runtime.Caller(1) 30 fmt.Fprintf(os.Stderr, "# \tcallee: %s:%d: ", path.Base(fn), fl) 31 fmt.Fprintln(os.Stderr) 32 os.Stderr.Sync() 33 } 34 35 func dbg(s string, va ...interface{}) { 36 if s == "" { 37 s = strings.Repeat("%v ", len(va)) 38 } 39 _, fn, fl, _ := runtime.Caller(1) 40 fmt.Fprintf(os.Stderr, "# dbg %s:%d: ", path.Base(fn), fl) 41 fmt.Fprintf(os.Stderr, s, va...) 42 fmt.Fprintln(os.Stderr) 43 os.Stderr.Sync() 44 } 45 46 func TODO(...interface{}) string { //TODOOK 47 _, fn, fl, _ := runtime.Caller(1) 48 return fmt.Sprintf("# TODO: %s:%d:\n", path.Base(fn), fl) //TODOOK 49 } 50 51 func use(...interface{}) {} 52 53 func init() { 54 use(caller, dbg, TODO) //TODOOK 55 } 56 57 // ============================================================================ 58 59 const quota = 128 << 20 60 61 var ( 62 max = 2 * osPageSize 63 bigMax = 2 * pageSize 64 ) 65 66 type block struct { 67 p uintptr 68 size int 69 } 70 71 func test1u(t *testing.T, max int) { 72 var alloc Allocator 73 rem := quota 74 var a []block 75 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 76 if err != nil { 77 t.Fatal(err) 78 } 79 80 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 81 if err != nil { 82 t.Fatal(err) 83 } 84 85 // Allocate 86 for rem > 0 { 87 size := srng.Next()%max + 1 88 rem -= size 89 p, err := alloc.UintptrMalloc(size) 90 if err != nil { 91 t.Fatal(err) 92 } 93 94 a = append(a, block{p, size}) 95 for i := 0; i < size; i++ { 96 *(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next()) 97 } 98 } 99 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 100 srng.Seek(0) 101 vrng.Seek(0) 102 // Verify 103 for i, b := range a { 104 if g, e := b.size, srng.Next()%max+1; g != e { 105 t.Fatal(i, g, e) 106 } 107 108 if a, b := b.size, UintptrUsableSize(b.p); a > b { 109 t.Fatal(i, a, b) 110 } 111 112 for j := 0; j < b.size; j++ { 113 g := *(*byte)(unsafe.Pointer(b.p + uintptr(j))) 114 if e := byte(vrng.Next()); g != e { 115 t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e) 116 } 117 118 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 119 } 120 } 121 // Shuffle 122 for i := range a { 123 j := srng.Next() % len(a) 124 a[i], a[j] = a[j], a[i] 125 } 126 // Free 127 for _, b := range a { 128 if err := alloc.UintptrFree(b.p); err != nil { 129 t.Fatal(err) 130 } 131 } 132 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 133 t.Fatalf("%+v", alloc) 134 } 135 } 136 137 func Test1USmall(t *testing.T) { test1u(t, max) } 138 func Test1UBig(t *testing.T) { test1u(t, bigMax) } 139 140 func test2u(t *testing.T, max int) { 141 var alloc Allocator 142 rem := quota 143 var a []block 144 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 145 if err != nil { 146 t.Fatal(err) 147 } 148 149 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 150 if err != nil { 151 t.Fatal(err) 152 } 153 154 // Allocate 155 for rem > 0 { 156 size := srng.Next()%max + 1 157 rem -= size 158 p, err := alloc.UintptrMalloc(size) 159 if err != nil { 160 t.Fatal(err) 161 } 162 163 a = append(a, block{p, size}) 164 for i := 0; i < size; i++ { 165 *(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next()) 166 } 167 } 168 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 169 srng.Seek(0) 170 vrng.Seek(0) 171 // Verify & free 172 for i, b := range a { 173 if g, e := b.size, srng.Next()%max+1; g != e { 174 t.Fatal(i, g, e) 175 } 176 177 if a, b := b.size, UintptrUsableSize(b.p); a > b { 178 t.Fatal(i, a, b) 179 } 180 181 for j := 0; j < b.size; j++ { 182 g := *(*byte)(unsafe.Pointer(b.p + uintptr(j))) 183 if e := byte(vrng.Next()); g != e { 184 t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e) 185 } 186 187 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 188 } 189 if err := alloc.UintptrFree(b.p); err != nil { 190 t.Fatal(err) 191 } 192 } 193 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 194 t.Fatalf("%+v", alloc) 195 } 196 } 197 198 func Test2USmall(t *testing.T) { test2u(t, max) } 199 func Test2UBig(t *testing.T) { test2u(t, bigMax) } 200 201 func test3u(t *testing.T, max int) { 202 var alloc Allocator 203 rem := quota 204 m := map[block][]byte{} 205 srng, err := mathutil.NewFC32(1, max, true) 206 if err != nil { 207 t.Fatal(err) 208 } 209 210 vrng, err := mathutil.NewFC32(1, max, true) 211 if err != nil { 212 t.Fatal(err) 213 } 214 215 for rem > 0 { 216 switch srng.Next() % 3 { 217 case 0, 1: // 2/3 allocate 218 size := srng.Next() 219 rem -= size 220 p, err := alloc.UintptrMalloc(size) 221 if err != nil { 222 t.Fatal(err) 223 } 224 225 b := make([]byte, size) 226 for i := range b { 227 b[i] = byte(vrng.Next()) 228 *(*byte)(unsafe.Pointer(p + uintptr(i))) = b[i] 229 } 230 m[block{p, size}] = append([]byte(nil), b...) 231 default: // 1/3 free 232 for b, v := range m { 233 for i, v := range v { 234 if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v { 235 t.Fatal("corrupted heap") 236 } 237 } 238 239 if a, b := b.size, UintptrUsableSize(b.p); a > b { 240 t.Fatal(a, b) 241 } 242 243 for j := 0; j < b.size; j++ { 244 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 245 } 246 rem += b.size 247 alloc.UintptrFree(b.p) 248 delete(m, b) 249 break 250 } 251 } 252 } 253 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 254 for b, v := range m { 255 for i, v := range v { 256 if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v { 257 t.Fatal("corrupted heap") 258 } 259 } 260 261 if a, b := b.size, UintptrUsableSize(b.p); a > b { 262 t.Fatal(a, b) 263 } 264 265 for j := 0; j < b.size; j++ { 266 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 267 } 268 alloc.UintptrFree(b.p) 269 } 270 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 271 t.Fatalf("%+v", alloc) 272 } 273 } 274 275 func Test3USmall(t *testing.T) { test3u(t, max) } 276 func Test3UBig(t *testing.T) { test3u(t, bigMax) } 277 278 func TestUFree(t *testing.T) { 279 var alloc Allocator 280 p, err := alloc.UintptrMalloc(1) 281 if err != nil { 282 t.Fatal(err) 283 } 284 285 if err := alloc.UintptrFree(p); err != nil { 286 t.Fatal(err) 287 } 288 289 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 290 t.Fatalf("%+v", alloc) 291 } 292 } 293 294 func TestUMalloc(t *testing.T) { 295 var alloc Allocator 296 p, err := alloc.UintptrMalloc(maxSlotSize) 297 if err != nil { 298 t.Fatal(err) 299 } 300 301 pg := (*page)(unsafe.Pointer(p &^ uintptr(osPageMask))) 302 if 1<<pg.log > maxSlotSize { 303 t.Fatal(1<<pg.log, maxSlotSize) 304 } 305 306 if err := alloc.UintptrFree(p); err != nil { 307 t.Fatal(err) 308 } 309 310 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 311 t.Fatalf("%+v", alloc) 312 } 313 } 314 315 func test1(t *testing.T, max int) { 316 var alloc Allocator 317 rem := quota 318 var a [][]byte 319 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 320 if err != nil { 321 t.Fatal(err) 322 } 323 324 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 325 if err != nil { 326 t.Fatal(err) 327 } 328 329 // Allocate 330 for rem > 0 { 331 size := srng.Next()%max + 1 332 rem -= size 333 b, err := alloc.Malloc(size) 334 if err != nil { 335 t.Fatal(err) 336 } 337 338 a = append(a, b) 339 for i := range b { 340 b[i] = byte(vrng.Next()) 341 } 342 } 343 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 344 srng.Seek(0) 345 vrng.Seek(0) 346 // Verify 347 for i, b := range a { 348 if g, e := len(b), srng.Next()%max+1; g != e { 349 t.Fatal(i, g, e) 350 } 351 352 if a, b := len(b), UsableSize(&b[0]); a > b { 353 t.Fatal(i, a, b) 354 } 355 356 for i, g := range b { 357 if e := byte(vrng.Next()); g != e { 358 t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e) 359 } 360 361 b[i] = 0 362 } 363 } 364 // Shuffle 365 for i := range a { 366 j := srng.Next() % len(a) 367 a[i], a[j] = a[j], a[i] 368 } 369 // Free 370 for _, b := range a { 371 if err := alloc.Free(b); err != nil { 372 t.Fatal(err) 373 } 374 } 375 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 376 t.Fatalf("%+v", alloc) 377 } 378 } 379 380 func Test1Small(t *testing.T) { test1(t, max) } 381 func Test1Big(t *testing.T) { test1(t, bigMax) } 382 383 func test2(t *testing.T, max int) { 384 var alloc Allocator 385 rem := quota 386 var a [][]byte 387 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 388 if err != nil { 389 t.Fatal(err) 390 } 391 392 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 393 if err != nil { 394 t.Fatal(err) 395 } 396 397 // Allocate 398 for rem > 0 { 399 size := srng.Next()%max + 1 400 rem -= size 401 b, err := alloc.Malloc(size) 402 if err != nil { 403 t.Fatal(err) 404 } 405 406 a = append(a, b) 407 for i := range b { 408 b[i] = byte(vrng.Next()) 409 } 410 } 411 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 412 srng.Seek(0) 413 vrng.Seek(0) 414 // Verify & free 415 for i, b := range a { 416 if g, e := len(b), srng.Next()%max+1; g != e { 417 t.Fatal(i, g, e) 418 } 419 420 if a, b := len(b), UsableSize(&b[0]); a > b { 421 t.Fatal(i, a, b) 422 } 423 424 for i, g := range b { 425 if e := byte(vrng.Next()); g != e { 426 t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e) 427 } 428 429 b[i] = 0 430 } 431 if err := alloc.Free(b); err != nil { 432 t.Fatal(err) 433 } 434 } 435 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 436 t.Fatalf("%+v", alloc) 437 } 438 } 439 440 func Test2Small(t *testing.T) { test2(t, max) } 441 func Test2Big(t *testing.T) { test2(t, bigMax) } 442 443 func test3(t *testing.T, max int) { 444 var alloc Allocator 445 rem := quota 446 m := map[*[]byte][]byte{} 447 srng, err := mathutil.NewFC32(1, max, true) 448 if err != nil { 449 t.Fatal(err) 450 } 451 452 vrng, err := mathutil.NewFC32(1, max, true) 453 if err != nil { 454 t.Fatal(err) 455 } 456 457 for rem > 0 { 458 switch srng.Next() % 3 { 459 case 0, 1: // 2/3 allocate 460 size := srng.Next() 461 rem -= size 462 b, err := alloc.Malloc(size) 463 if err != nil { 464 t.Fatal(err) 465 } 466 467 for i := range b { 468 b[i] = byte(vrng.Next()) 469 } 470 m[&b] = append([]byte(nil), b...) 471 default: // 1/3 free 472 for k, v := range m { 473 b := *k 474 if !bytes.Equal(b, v) { 475 t.Fatal("corrupted heap") 476 } 477 478 if a, b := len(b), UsableSize(&b[0]); a > b { 479 t.Fatal(a, b) 480 } 481 482 for i := range b { 483 b[i] = 0 484 } 485 rem += len(b) 486 alloc.Free(b) 487 delete(m, k) 488 break 489 } 490 } 491 } 492 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.allocs, alloc.mmaps, alloc.bytes, alloc.bytes-quota, 100*float64(alloc.bytes-quota)/quota) 493 for k, v := range m { 494 b := *k 495 if !bytes.Equal(b, v) { 496 t.Fatal("corrupted heap") 497 } 498 499 if a, b := len(b), UsableSize(&b[0]); a > b { 500 t.Fatal(a, b) 501 } 502 503 for i := range b { 504 b[i] = 0 505 } 506 alloc.Free(b) 507 } 508 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 509 t.Fatalf("%+v", alloc) 510 } 511 } 512 513 func Test3Small(t *testing.T) { test3(t, max) } 514 func Test3Big(t *testing.T) { test3(t, bigMax) } 515 516 func TestFree(t *testing.T) { 517 var alloc Allocator 518 b, err := alloc.Malloc(1) 519 if err != nil { 520 t.Fatal(err) 521 } 522 523 if err := alloc.Free(b[:0]); err != nil { 524 t.Fatal(err) 525 } 526 527 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 528 t.Fatalf("%+v", alloc) 529 } 530 } 531 532 func TestMalloc(t *testing.T) { 533 var alloc Allocator 534 b, err := alloc.Malloc(maxSlotSize) 535 if err != nil { 536 t.Fatal(err) 537 } 538 539 p := (*page)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) &^ uintptr(osPageMask))) 540 if 1<<p.log > maxSlotSize { 541 t.Fatal(1<<p.log, maxSlotSize) 542 } 543 544 if err := alloc.Free(b[:0]); err != nil { 545 t.Fatal(err) 546 } 547 548 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 549 t.Fatalf("%+v", alloc) 550 } 551 } 552 553 func benchmarkFree(b *testing.B, size int) { 554 var alloc Allocator 555 a := make([][]byte, b.N) 556 for i := range a { 557 p, err := alloc.Malloc(size) 558 if err != nil { 559 b.Fatal(err) 560 } 561 562 a[i] = p 563 } 564 b.ResetTimer() 565 for _, b := range a { 566 alloc.Free(b) 567 } 568 b.StopTimer() 569 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 570 b.Fatalf("%+v", alloc) 571 } 572 } 573 574 func BenchmarkFree16(b *testing.B) { benchmarkFree(b, 1<<4) } 575 func BenchmarkFree32(b *testing.B) { benchmarkFree(b, 1<<5) } 576 func BenchmarkFree64(b *testing.B) { benchmarkFree(b, 1<<6) } 577 578 func benchmarkCalloc(b *testing.B, size int) { 579 var alloc Allocator 580 a := make([][]byte, b.N) 581 b.ResetTimer() 582 for i := range a { 583 p, err := alloc.Calloc(size) 584 if err != nil { 585 b.Fatal(err) 586 } 587 588 a[i] = p 589 } 590 b.StopTimer() 591 for _, b := range a { 592 alloc.Free(b) 593 } 594 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 595 b.Fatalf("%+v", alloc) 596 } 597 } 598 599 func BenchmarkCalloc16(b *testing.B) { benchmarkCalloc(b, 1<<4) } 600 func BenchmarkCalloc32(b *testing.B) { benchmarkCalloc(b, 1<<5) } 601 func BenchmarkCalloc64(b *testing.B) { benchmarkCalloc(b, 1<<6) } 602 603 func benchmarkGoCalloc(b *testing.B, size int) { 604 a := make([][]byte, b.N) 605 b.ResetTimer() 606 for i := range a { 607 a[i] = make([]byte, size) 608 } 609 b.StopTimer() 610 use(a) 611 } 612 613 func BenchmarkGoCalloc16(b *testing.B) { benchmarkGoCalloc(b, 1<<4) } 614 func BenchmarkGoCalloc32(b *testing.B) { benchmarkGoCalloc(b, 1<<5) } 615 func BenchmarkGoCalloc64(b *testing.B) { benchmarkGoCalloc(b, 1<<6) } 616 617 func benchmarkMalloc(b *testing.B, size int) { 618 var alloc Allocator 619 a := make([][]byte, b.N) 620 b.ResetTimer() 621 for i := range a { 622 p, err := alloc.Malloc(size) 623 if err != nil { 624 b.Fatal(err) 625 } 626 627 a[i] = p 628 } 629 b.StopTimer() 630 for _, b := range a { 631 alloc.Free(b) 632 } 633 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 634 b.Fatalf("%+v", alloc) 635 } 636 } 637 638 func BenchmarkMalloc16(b *testing.B) { benchmarkMalloc(b, 1<<4) } 639 func BenchmarkMalloc32(b *testing.B) { benchmarkMalloc(b, 1<<5) } 640 func BenchmarkMalloc64(b *testing.B) { benchmarkMalloc(b, 1<<6) } 641 642 func benchmarkUintptrFree(b *testing.B, size int) { 643 var alloc Allocator 644 a := make([]uintptr, b.N) 645 for i := range a { 646 p, err := alloc.UintptrMalloc(size) 647 if err != nil { 648 b.Fatal(err) 649 } 650 651 a[i] = p 652 } 653 b.ResetTimer() 654 for _, p := range a { 655 alloc.UintptrFree(p) 656 } 657 b.StopTimer() 658 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 659 b.Fatalf("%+v", alloc) 660 } 661 } 662 663 func BenchmarkUintptrFree16(b *testing.B) { benchmarkUintptrFree(b, 1<<4) } 664 func BenchmarkUintptrFree32(b *testing.B) { benchmarkUintptrFree(b, 1<<5) } 665 func BenchmarkUintptrFree64(b *testing.B) { benchmarkUintptrFree(b, 1<<6) } 666 667 func benchmarkUintptrCalloc(b *testing.B, size int) { 668 var alloc Allocator 669 a := make([]uintptr, b.N) 670 b.ResetTimer() 671 for i := range a { 672 p, err := alloc.UintptrCalloc(size) 673 if err != nil { 674 b.Fatal(err) 675 } 676 677 a[i] = p 678 } 679 b.StopTimer() 680 for _, p := range a { 681 alloc.UintptrFree(p) 682 } 683 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 684 b.Fatalf("%+v", alloc) 685 } 686 } 687 688 func BenchmarkUintptrCalloc16(b *testing.B) { benchmarkUintptrCalloc(b, 1<<4) } 689 func BenchmarkUintptrCalloc32(b *testing.B) { benchmarkUintptrCalloc(b, 1<<5) } 690 func BenchmarkUintptrCalloc64(b *testing.B) { benchmarkUintptrCalloc(b, 1<<6) } 691 692 func benchmarkUintptrMalloc(b *testing.B, size int) { 693 var alloc Allocator 694 a := make([]uintptr, b.N) 695 b.ResetTimer() 696 for i := range a { 697 p, err := alloc.UintptrMalloc(size) 698 if err != nil { 699 b.Fatal(err) 700 } 701 702 a[i] = p 703 } 704 b.StopTimer() 705 for _, p := range a { 706 alloc.UintptrFree(p) 707 } 708 if alloc.allocs != 0 || alloc.mmaps != 0 || alloc.bytes != 0 || len(alloc.regs) != 0 { 709 b.Fatalf("%+v", alloc) 710 } 711 } 712 713 func BenchmarkUintptrMalloc16(b *testing.B) { benchmarkUintptrMalloc(b, 1<<4) } 714 func BenchmarkUintptrMalloc32(b *testing.B) { benchmarkUintptrMalloc(b, 1<<5) } 715 func BenchmarkUintptrMalloc64(b *testing.B) { benchmarkUintptrMalloc(b, 1<<6) }