modernc.org/memory@v1.6.0/all_test.go (about) 1 // Copyright 2017 The Memory Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package memory // import "modernc.org/memory" 6 7 import ( 8 "bytes" 9 "fmt" 10 "math" 11 "os" 12 "path" 13 "runtime" 14 "strings" 15 "testing" 16 "unsafe" 17 18 "modernc.org/mathutil" 19 ) 20 21 func caller(s string, va ...interface{}) { 22 if s == "" { 23 s = strings.Repeat("%v ", len(va)) 24 } 25 _, fn, fl, _ := runtime.Caller(2) 26 fmt.Fprintf(os.Stderr, "# caller: %s:%d: ", path.Base(fn), fl) 27 fmt.Fprintf(os.Stderr, s, va...) 28 fmt.Fprintln(os.Stderr) 29 _, fn, fl, _ = runtime.Caller(1) 30 fmt.Fprintf(os.Stderr, "# \tcallee: %s:%d: ", path.Base(fn), fl) 31 fmt.Fprintln(os.Stderr) 32 os.Stderr.Sync() 33 } 34 35 func dbg(s string, va ...interface{}) { 36 if s == "" { 37 s = strings.Repeat("%v ", len(va)) 38 } 39 _, fn, fl, _ := runtime.Caller(1) 40 fmt.Fprintf(os.Stderr, "# dbg %s:%d: ", path.Base(fn), fl) 41 fmt.Fprintf(os.Stderr, s, va...) 42 fmt.Fprintln(os.Stderr) 43 os.Stderr.Sync() 44 } 45 46 func TODO(...interface{}) string { //TODOOK 47 _, fn, fl, _ := runtime.Caller(1) 48 return fmt.Sprintf("# TODO: %s:%d:\n", path.Base(fn), fl) //TODOOK 49 } 50 51 func use(...interface{}) {} 52 53 func init() { 54 use(caller, dbg, TODO) //TODOOK 55 } 56 57 // ============================================================================ 58 59 const quota = 128 << 20 60 61 var ( 62 max = 2 * osPageSize 63 bigMax = 2 * pageSize 64 ) 65 66 type block struct { 67 p uintptr 68 size int 69 } 70 71 func test1u(t *testing.T, max int) { 72 var alloc Allocator 73 rem := quota 74 var a []block 75 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 76 if err != nil { 77 t.Fatal(err) 78 } 79 80 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 81 if err != nil { 82 t.Fatal(err) 83 } 84 85 // Allocate 86 for rem > 0 { 87 size := srng.Next()%max + 1 88 rem -= size 89 p, err := alloc.UintptrMalloc(size) 90 if err != nil { 91 t.Fatal(err) 92 } 93 94 a = append(a, block{p, size}) 95 for i := 0; i < size; i++ { 96 *(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next()) 97 } 98 } 99 if counters { 100 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 101 } 102 srng.Seek(0) 103 vrng.Seek(0) 104 // Verify 105 for i, b := range a { 106 if g, e := b.size, srng.Next()%max+1; g != e { 107 t.Fatal(i, g, e) 108 } 109 110 if a, b := b.size, UintptrUsableSize(b.p); a > b { 111 t.Fatal(i, a, b) 112 } 113 114 for j := 0; j < b.size; j++ { 115 g := *(*byte)(unsafe.Pointer(b.p + uintptr(j))) 116 if e := byte(vrng.Next()); g != e { 117 t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e) 118 } 119 120 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 121 } 122 } 123 // Shuffle 124 for i := range a { 125 j := srng.Next() % len(a) 126 a[i], a[j] = a[j], a[i] 127 } 128 // Free 129 for _, b := range a { 130 if err := alloc.UintptrFree(b.p); err != nil { 131 t.Fatal(err) 132 } 133 } 134 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 135 t.Fatalf("%+v", alloc) 136 } 137 } 138 139 func Test1USmall(t *testing.T) { test1u(t, max) } 140 func Test1UBig(t *testing.T) { test1u(t, bigMax) } 141 142 func test2u(t *testing.T, max int) { 143 var alloc Allocator 144 rem := quota 145 var a []block 146 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 147 if err != nil { 148 t.Fatal(err) 149 } 150 151 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 152 if err != nil { 153 t.Fatal(err) 154 } 155 156 // Allocate 157 for rem > 0 { 158 size := srng.Next()%max + 1 159 rem -= size 160 p, err := alloc.UintptrMalloc(size) 161 if err != nil { 162 t.Fatal(err) 163 } 164 165 a = append(a, block{p, size}) 166 for i := 0; i < size; i++ { 167 *(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next()) 168 } 169 } 170 if counters { 171 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 172 } 173 srng.Seek(0) 174 vrng.Seek(0) 175 // Verify & free 176 for i, b := range a { 177 if g, e := b.size, srng.Next()%max+1; g != e { 178 t.Fatal(i, g, e) 179 } 180 181 if a, b := b.size, UintptrUsableSize(b.p); a > b { 182 t.Fatal(i, a, b) 183 } 184 185 for j := 0; j < b.size; j++ { 186 g := *(*byte)(unsafe.Pointer(b.p + uintptr(j))) 187 if e := byte(vrng.Next()); g != e { 188 t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e) 189 } 190 191 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 192 } 193 if err := alloc.UintptrFree(b.p); err != nil { 194 t.Fatal(err) 195 } 196 } 197 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 198 t.Fatalf("%+v", alloc) 199 } 200 } 201 202 func Test2USmall(t *testing.T) { test2u(t, max) } 203 func Test2UBig(t *testing.T) { test2u(t, bigMax) } 204 205 func test3u(t *testing.T, max int) { 206 var alloc Allocator 207 rem := quota 208 m := map[block][]byte{} 209 srng, err := mathutil.NewFC32(1, max, true) 210 if err != nil { 211 t.Fatal(err) 212 } 213 214 vrng, err := mathutil.NewFC32(1, max, true) 215 if err != nil { 216 t.Fatal(err) 217 } 218 219 for rem > 0 { 220 switch srng.Next() % 3 { 221 case 0, 1: // 2/3 allocate 222 size := srng.Next() 223 rem -= size 224 p, err := alloc.UintptrMalloc(size) 225 if err != nil { 226 t.Fatal(err) 227 } 228 229 b := make([]byte, size) 230 for i := range b { 231 b[i] = byte(vrng.Next()) 232 *(*byte)(unsafe.Pointer(p + uintptr(i))) = b[i] 233 } 234 m[block{p, size}] = append([]byte(nil), b...) 235 default: // 1/3 free 236 for b, v := range m { 237 for i, v := range v { 238 if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v { 239 t.Fatal("corrupted heap") 240 } 241 } 242 243 if a, b := b.size, UintptrUsableSize(b.p); a > b { 244 t.Fatal(a, b) 245 } 246 247 for j := 0; j < b.size; j++ { 248 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 249 } 250 rem += b.size 251 alloc.UintptrFree(b.p) 252 delete(m, b) 253 break 254 } 255 } 256 } 257 if counters { 258 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 259 } 260 for b, v := range m { 261 for i, v := range v { 262 if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v { 263 t.Fatal("corrupted heap") 264 } 265 } 266 267 if a, b := b.size, UintptrUsableSize(b.p); a > b { 268 t.Fatal(a, b) 269 } 270 271 for j := 0; j < b.size; j++ { 272 *(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0 273 } 274 alloc.UintptrFree(b.p) 275 } 276 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 277 t.Fatalf("%+v", alloc) 278 } 279 } 280 281 func Test3USmall(t *testing.T) { test3u(t, max) } 282 func Test3UBig(t *testing.T) { test3u(t, bigMax) } 283 284 func TestUFree(t *testing.T) { 285 var alloc Allocator 286 p, err := alloc.UintptrMalloc(1) 287 if err != nil { 288 t.Fatal(err) 289 } 290 291 if err := alloc.UintptrFree(p); err != nil { 292 t.Fatal(err) 293 } 294 295 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 296 t.Fatalf("%+v", alloc) 297 } 298 } 299 300 func TestUMalloc(t *testing.T) { 301 var alloc Allocator 302 p, err := alloc.UintptrMalloc(maxSlotSize) 303 if err != nil { 304 t.Fatal(err) 305 } 306 307 pg := (*page)(unsafe.Pointer(p &^ uintptr(osPageMask))) 308 if 1<<pg.log > maxSlotSize { 309 t.Fatal(1<<pg.log, maxSlotSize) 310 } 311 312 if err := alloc.UintptrFree(p); err != nil { 313 t.Fatal(err) 314 } 315 316 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 317 t.Fatalf("%+v", alloc) 318 } 319 } 320 321 func test1(t *testing.T, max int) { 322 var alloc Allocator 323 rem := quota 324 var a [][]byte 325 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 326 if err != nil { 327 t.Fatal(err) 328 } 329 330 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 331 if err != nil { 332 t.Fatal(err) 333 } 334 335 // Allocate 336 for rem > 0 { 337 size := srng.Next()%max + 1 338 rem -= size 339 b, err := alloc.Malloc(size) 340 if err != nil { 341 t.Fatal(err) 342 } 343 344 a = append(a, b) 345 for i := range b { 346 b[i] = byte(vrng.Next()) 347 } 348 } 349 if counters { 350 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 351 } 352 srng.Seek(0) 353 vrng.Seek(0) 354 // Verify 355 for i, b := range a { 356 if g, e := len(b), srng.Next()%max+1; g != e { 357 t.Fatal(i, g, e) 358 } 359 360 if a, b := len(b), UsableSize(&b[0]); a > b { 361 t.Fatal(i, a, b) 362 } 363 364 for i, g := range b { 365 if e := byte(vrng.Next()); g != e { 366 t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e) 367 } 368 369 b[i] = 0 370 } 371 } 372 // Shuffle 373 for i := range a { 374 j := srng.Next() % len(a) 375 a[i], a[j] = a[j], a[i] 376 } 377 // Free 378 for _, b := range a { 379 if err := alloc.Free(b); err != nil { 380 t.Fatal(err) 381 } 382 } 383 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 384 t.Fatalf("%+v", alloc) 385 } 386 } 387 388 func Test1Small(t *testing.T) { test1(t, max) } 389 func Test1Big(t *testing.T) { test1(t, bigMax) } 390 391 func test2(t *testing.T, max int) { 392 var alloc Allocator 393 rem := quota 394 var a [][]byte 395 srng, err := mathutil.NewFC32(0, math.MaxInt32, true) 396 if err != nil { 397 t.Fatal(err) 398 } 399 400 vrng, err := mathutil.NewFC32(0, math.MaxInt32, true) 401 if err != nil { 402 t.Fatal(err) 403 } 404 405 // Allocate 406 for rem > 0 { 407 size := srng.Next()%max + 1 408 rem -= size 409 b, err := alloc.Malloc(size) 410 if err != nil { 411 t.Fatal(err) 412 } 413 414 a = append(a, b) 415 for i := range b { 416 b[i] = byte(vrng.Next()) 417 } 418 } 419 if counters { 420 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 421 } 422 srng.Seek(0) 423 vrng.Seek(0) 424 // Verify & free 425 for i, b := range a { 426 if g, e := len(b), srng.Next()%max+1; g != e { 427 t.Fatal(i, g, e) 428 } 429 430 if a, b := len(b), UsableSize(&b[0]); a > b { 431 t.Fatal(i, a, b) 432 } 433 434 for i, g := range b { 435 if e := byte(vrng.Next()); g != e { 436 t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e) 437 } 438 439 b[i] = 0 440 } 441 if err := alloc.Free(b); err != nil { 442 t.Fatal(err) 443 } 444 } 445 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 446 t.Fatalf("%+v", alloc) 447 } 448 } 449 450 func Test2Small(t *testing.T) { test2(t, max) } 451 func Test2Big(t *testing.T) { test2(t, bigMax) } 452 453 func test3(t *testing.T, max int) { 454 var alloc Allocator 455 rem := quota 456 m := map[*[]byte][]byte{} 457 srng, err := mathutil.NewFC32(1, max, true) 458 if err != nil { 459 t.Fatal(err) 460 } 461 462 vrng, err := mathutil.NewFC32(1, max, true) 463 if err != nil { 464 t.Fatal(err) 465 } 466 467 for rem > 0 { 468 switch srng.Next() % 3 { 469 case 0, 1: // 2/3 allocate 470 size := srng.Next() 471 rem -= size 472 b, err := alloc.Malloc(size) 473 if err != nil { 474 t.Fatal(err) 475 } 476 477 for i := range b { 478 b[i] = byte(vrng.Next()) 479 } 480 m[&b] = append([]byte(nil), b...) 481 default: // 1/3 free 482 for k, v := range m { 483 b := *k 484 if !bytes.Equal(b, v) { 485 t.Fatal("corrupted heap") 486 } 487 488 if a, b := len(b), UsableSize(&b[0]); a > b { 489 t.Fatal(a, b) 490 } 491 492 for i := range b { 493 b[i] = 0 494 } 495 rem += len(b) 496 alloc.Free(b) 497 delete(m, k) 498 break 499 } 500 } 501 } 502 if counters { 503 t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota) 504 } 505 for k, v := range m { 506 b := *k 507 if !bytes.Equal(b, v) { 508 t.Fatal("corrupted heap") 509 } 510 511 if a, b := len(b), UsableSize(&b[0]); a > b { 512 t.Fatal(a, b) 513 } 514 515 for i := range b { 516 b[i] = 0 517 } 518 alloc.Free(b) 519 } 520 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 521 t.Fatalf("%+v", alloc) 522 } 523 } 524 525 func Test3Small(t *testing.T) { test3(t, max) } 526 func Test3Big(t *testing.T) { test3(t, bigMax) } 527 528 func TestFree(t *testing.T) { 529 var alloc Allocator 530 b, err := alloc.Malloc(1) 531 if err != nil { 532 t.Fatal(err) 533 } 534 535 if err := alloc.Free(b[:0]); err != nil { 536 t.Fatal(err) 537 } 538 539 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 540 t.Fatalf("%+v", alloc) 541 } 542 } 543 544 func TestMalloc(t *testing.T) { 545 var alloc Allocator 546 b, err := alloc.Malloc(maxSlotSize) 547 if err != nil { 548 t.Fatal(err) 549 } 550 551 p := (*page)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) &^ uintptr(osPageMask))) 552 if 1<<p.log > maxSlotSize { 553 t.Fatal(1<<p.log, maxSlotSize) 554 } 555 556 if err := alloc.Free(b[:0]); err != nil { 557 t.Fatal(err) 558 } 559 560 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 561 t.Fatalf("%+v", alloc) 562 } 563 } 564 565 func benchmarkFree(b *testing.B, size int) { 566 var alloc Allocator 567 a := make([][]byte, b.N) 568 for i := range a { 569 p, err := alloc.Malloc(size) 570 if err != nil { 571 b.Fatal(err) 572 } 573 574 a[i] = p 575 } 576 b.ResetTimer() 577 for _, b := range a { 578 alloc.Free(b) 579 } 580 b.StopTimer() 581 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 582 b.Fatalf("%+v", alloc) 583 } 584 } 585 586 func BenchmarkFree16(b *testing.B) { benchmarkFree(b, 1<<4) } 587 func BenchmarkFree32(b *testing.B) { benchmarkFree(b, 1<<5) } 588 func BenchmarkFree64(b *testing.B) { benchmarkFree(b, 1<<6) } 589 590 func benchmarkCalloc(b *testing.B, size int) { 591 var alloc Allocator 592 a := make([][]byte, b.N) 593 b.ResetTimer() 594 for i := range a { 595 p, err := alloc.Calloc(size) 596 if err != nil { 597 b.Fatal(err) 598 } 599 600 a[i] = p 601 } 602 b.StopTimer() 603 for _, b := range a { 604 alloc.Free(b) 605 } 606 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 607 b.Fatalf("%+v", alloc) 608 } 609 } 610 611 func BenchmarkCalloc16(b *testing.B) { benchmarkCalloc(b, 1<<4) } 612 func BenchmarkCalloc32(b *testing.B) { benchmarkCalloc(b, 1<<5) } 613 func BenchmarkCalloc64(b *testing.B) { benchmarkCalloc(b, 1<<6) } 614 615 func benchmarkGoCalloc(b *testing.B, size int) { 616 a := make([][]byte, b.N) 617 b.ResetTimer() 618 for i := range a { 619 a[i] = make([]byte, size) 620 } 621 b.StopTimer() 622 use(a) 623 } 624 625 func BenchmarkGoCalloc16(b *testing.B) { benchmarkGoCalloc(b, 1<<4) } 626 func BenchmarkGoCalloc32(b *testing.B) { benchmarkGoCalloc(b, 1<<5) } 627 func BenchmarkGoCalloc64(b *testing.B) { benchmarkGoCalloc(b, 1<<6) } 628 629 func benchmarkMalloc(b *testing.B, size int) { 630 var alloc Allocator 631 a := make([][]byte, b.N) 632 b.ResetTimer() 633 for i := range a { 634 p, err := alloc.Malloc(size) 635 if err != nil { 636 b.Fatal(err) 637 } 638 639 a[i] = p 640 } 641 b.StopTimer() 642 for _, b := range a { 643 alloc.Free(b) 644 } 645 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 646 b.Fatalf("%+v", alloc) 647 } 648 } 649 650 func BenchmarkMalloc16(b *testing.B) { benchmarkMalloc(b, 1<<4) } 651 func BenchmarkMalloc32(b *testing.B) { benchmarkMalloc(b, 1<<5) } 652 func BenchmarkMalloc64(b *testing.B) { benchmarkMalloc(b, 1<<6) } 653 654 func benchmarkUintptrFree(b *testing.B, size int) { 655 var alloc Allocator 656 a := make([]uintptr, b.N) 657 for i := range a { 658 p, err := alloc.UintptrMalloc(size) 659 if err != nil { 660 b.Fatal(err) 661 } 662 663 a[i] = p 664 } 665 b.ResetTimer() 666 for _, p := range a { 667 alloc.UintptrFree(p) 668 } 669 b.StopTimer() 670 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 671 b.Fatalf("%+v", alloc) 672 } 673 } 674 675 func BenchmarkUintptrFree16(b *testing.B) { benchmarkUintptrFree(b, 1<<4) } 676 func BenchmarkUintptrFree32(b *testing.B) { benchmarkUintptrFree(b, 1<<5) } 677 func BenchmarkUintptrFree64(b *testing.B) { benchmarkUintptrFree(b, 1<<6) } 678 679 func benchmarkUintptrCalloc(b *testing.B, size int) { 680 var alloc Allocator 681 a := make([]uintptr, b.N) 682 b.ResetTimer() 683 for i := range a { 684 p, err := alloc.UintptrCalloc(size) 685 if err != nil { 686 b.Fatal(err) 687 } 688 689 a[i] = p 690 } 691 b.StopTimer() 692 for _, p := range a { 693 alloc.UintptrFree(p) 694 } 695 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 696 b.Fatalf("%+v", alloc) 697 } 698 } 699 700 func BenchmarkUintptrCalloc16(b *testing.B) { benchmarkUintptrCalloc(b, 1<<4) } 701 func BenchmarkUintptrCalloc32(b *testing.B) { benchmarkUintptrCalloc(b, 1<<5) } 702 func BenchmarkUintptrCalloc64(b *testing.B) { benchmarkUintptrCalloc(b, 1<<6) } 703 704 func benchmarkUintptrMalloc(b *testing.B, size int) { 705 var alloc Allocator 706 a := make([]uintptr, b.N) 707 b.ResetTimer() 708 for i := range a { 709 p, err := alloc.UintptrMalloc(size) 710 if err != nil { 711 b.Fatal(err) 712 } 713 714 a[i] = p 715 } 716 b.StopTimer() 717 for _, p := range a { 718 alloc.UintptrFree(p) 719 } 720 if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 { 721 b.Fatalf("%+v", alloc) 722 } 723 } 724 725 func BenchmarkUintptrMalloc16(b *testing.B) { benchmarkUintptrMalloc(b, 1<<4) } 726 func BenchmarkUintptrMalloc32(b *testing.B) { benchmarkUintptrMalloc(b, 1<<5) } 727 func BenchmarkUintptrMalloc64(b *testing.B) { benchmarkUintptrMalloc(b, 1<<6) }