github.com/moontrade/nogc@v0.1.7/alloc/rpmalloc/rpmalloc_test.go (about) 1 package rpmalloc 2 3 import ( 4 "fmt" 5 "math/rand" 6 "runtime" 7 "sync" 8 "testing" 9 "time" 10 "unsafe" 11 12 "github.com/moontrade/nogc/alloc/tlsf" 13 ) 14 15 func TestCall(t *testing.T) { 16 17 } 18 19 func TestAlloc(t *testing.T) { 20 21 Malloc(128) 22 var wg = &sync.WaitGroup{} 23 24 wg.Add(100) 25 for i := 0; i < 100; i++ { 26 go func() { 27 runtime.LockOSThread() 28 defer wg.Done() 29 30 Free(Malloc(128)) 31 a := StdMalloc(24) 32 StdFree(a) 33 34 StdFree(Malloc(64)) 35 }() 36 } 37 38 wg.Wait() 39 a := StdMalloc(24) 40 StdFree(a) 41 42 //time.Sleep(time.Hour) 43 //directPtr := AllocDirect(32) 44 //FreeDirect(directPtr) 45 //HookDirect() 46 //Hook() 47 //InitThread() 48 //a = Malloc(24) 49 //println("usable size for", 24, uint(UsableSize(a))) 50 //Free(a) 51 // 52 //a, c := MallocCap(24) 53 //println("size", 24, "cap", c) 54 //Free(a) 55 // 56 //a = Malloc(32) 57 //println("usable size for", 32, uint(UsableSize(a))) 58 //Free(a) 59 60 //for i := 0; i < 100; i++ { 61 // go func() { 62 // InitThread() 63 // Free(Malloc(32)) 64 // }() 65 //} 66 } 67 68 func BenchmarkCGO(b *testing.B) { 69 //b.Run("cgo", func(b *testing.B) { 70 // for i := 0; i < b.N; i++ { 71 // Stub() 72 // } 73 //}) 74 // 75 //b.Run("direct", func(b *testing.B) { 76 // for i := 0; i < b.N; i++ { 77 // StubDirect() 78 // } 79 //}) 80 // 81 //b.Run("alloc/free direct", func(b *testing.B) { 82 // b.ResetTimer() 83 // b.ReportAllocs() 84 // for i := 0; i < b.N; i++ { 85 // FreeDirect(AllocDirect(32)) 86 // } 87 //}) 88 89 b.Run("malloc/free cgo", func(b *testing.B) { 90 InitThread() 91 b.ResetTimer() 92 b.ReportAllocs() 93 for i := 0; i < b.N; i++ { 94 Free(Malloc(32)) 95 } 96 }) 97 98 b.Run("malloc_cap/free cgo", func(b *testing.B) { 99 InitThread() 100 var ( 101 ptr, c uintptr 102 ) 103 b.ResetTimer() 104 b.ReportAllocs() 105 for i := 0; i < b.N; i++ { 106 ptr, c = MallocCap(8) 107 _ = c 108 Free(ptr) 109 } 110 }) 111 112 b.Run("calloc/free cgo", func(b *testing.B) { 113 InitThread() 114 b.ResetTimer() 115 b.ReportAllocs() 116 for i := 0; i < b.N; i++ { 117 Free(Calloc(1, 32)) 118 } 119 }) 120 121 b.Run("tlsf alloc/free tlsf", func(b *testing.B) { 122 a := tlsf.NewHeap(1) 123 b.ResetTimer() 124 b.ReportAllocs() 125 for i := 0; i < b.N; i++ { 126 a.Free(a.Alloc(32)) 127 } 128 }) 129 130 //b.Run("alloc direct 32", func(b *testing.B) { 131 // for i := 0; i < b.N; i++ { 132 // AllocDirect32() 133 // } 134 //}) 135 } 136 137 func Test_AllocatorThrash(t *testing.T) { 138 statsBefore := runtime.MemStats{} 139 runtime.ReadMemStats(&statsBefore) 140 thrashAllocator(false, 141 1000000, 100, 15000, 21000, 142 randomSize(0.95, 16, 48), 143 randomSize(0.95, 48, 192), 144 randomSize(0.55, 64, 512), 145 //randomSize(0.70, 128, 512), 146 //randomSize(0.15, 128, 512), 147 //randomSize(0.30, 128, 1024), 148 ) 149 150 var stats ThreadStats 151 ReadThreadStats(&stats) 152 153 var globalStats GlobalStats 154 ReadGlobalStats(&globalStats) 155 156 var statsAfter runtime.MemStats 157 runtime.ReadMemStats(&statsAfter) 158 //fmt.Println("SysAllocator Size", a.Size()) 159 160 fmt.Println("GCStats Before", statsBefore) 161 fmt.Println("GCStats After", statsAfter) 162 163 //thrashAllocator(newAllocator(2), 100000, 100, 12000, 17000, 164 // randomSize(0.80, 24, 96), 165 // //randomSize(0.70, 128, 512), 166 // //randomSize(0.15, 128, 512), 167 // //randomSize(0.30, 128, 1024), 168 //) 169 } 170 171 type sizeClass struct { 172 pct float64 173 min, max int 174 next func() int 175 } 176 177 func randomSize(pct float64, min, max int) *sizeClass { 178 sz := &sizeClass{pct, min, max, nil} 179 sz.next = sz.nextRandom 180 return sz 181 } 182 183 func (s *sizeClass) nextRandom() int { 184 if s.max == s.min { 185 return s.max 186 } 187 return rand.Intn(s.max-s.min) + s.min 188 } 189 190 func thrashAllocator( 191 shuffle bool, 192 iterations, allocsPerIteration, minAllocs, maxAllocs int, 193 sizeClasses ...*sizeClass, 194 ) { 195 type allocation struct { 196 ptr uintptr 197 size int 198 } 199 200 sz := make([]int, 0, allocsPerIteration) 201 for _, sc := range sizeClasses { 202 for i := 0; i < int(float64(allocsPerIteration)*sc.pct); i++ { 203 sz = append(sz, sc.next()) 204 } 205 } 206 207 allocs := make([]allocation, 0, maxAllocs) 208 allocSize := 0 209 totalAllocs := 0 210 totalFrees := 0 211 maxAllocCount := 0 212 maxAllocSize := 0 213 214 if shuffle { 215 rand.Seed(time.Now().UnixNano()) 216 } 217 218 start := time.Now() 219 for i := 0; i < iterations; i++ { 220 if shuffle { 221 rand.Shuffle(len(sz), func(i, j int) { sz[i], sz[j] = sz[j], sz[i] }) 222 } 223 224 for _, size := range sz { 225 allocs = append(allocs, allocation{ 226 ptr: Malloc(uintptr(size)), //tlsfalloc(uintptr(size)), 227 size: size, 228 }) 229 allocSize += size 230 } 231 totalAllocs += len(sz) 232 233 if maxAllocCount < len(allocs) { 234 maxAllocCount = len(allocs) 235 } 236 if allocSize > maxAllocSize { 237 maxAllocSize = allocSize 238 } 239 240 if len(allocs) < minAllocs || len(allocs) < maxAllocs { 241 continue 242 } 243 244 //rand.Shuffle(len(allocs), func(i, j int) { allocs[i], allocs[j] = allocs[j], allocs[i] }) 245 max := randomRange(minAllocs, maxAllocs) 246 //max := maxAllocs 247 totalFrees += len(allocs) - max 248 for x := max; x < len(allocs); x++ { 249 alloc := allocs[x] 250 Free(alloc.ptr) 251 allocSize -= alloc.size 252 } 253 allocs = allocs[:max] 254 } 255 256 elapsed := time.Now().Sub(start) 257 seconds := float64(elapsed) / float64(time.Second) 258 println("total time ", elapsed.String()) 259 fmt.Printf("allocs per sec %.1f million / sec\n", float64(float64(totalAllocs)/seconds/1000000)) 260 //println("allocs per sec ", float64(totalAllocs) / seconds) 261 println("alloc bytes ", allocSize) 262 println("alloc count ", len(allocs)) 263 println("total allocs ", totalAllocs) 264 println("total frees ", totalFrees) 265 println("total frees ", totalFrees) 266 //println("memory pages ", allocator.Pages) 267 //println("heap size ", allocator.HeapSize) 268 //println("free size ", allocator.FreeSize) 269 //println("alloc size ", allocator.AllocSize) 270 //println("alloc size ", AllocSize) 271 println("max allocs ", maxAllocCount) 272 //println("max alloc size ", allocator.PeakAllocSize) 273 //println("fragmentation ", fmt.Sprintf("%.2f", allocator.Stats.Fragmentation())) 274 } 275 276 func randomRange(min, max int) int { 277 return rand.Intn(max-min) + min 278 } 279 280 func BenchmarkAllocator_Alloc(b *testing.B) { 281 var ( 282 min, max = 36, 8092 283 runTLSF = true 284 showGCStats = false 285 ) 286 doAfter := func(before, after runtime.MemStats) { 287 if showGCStats { 288 fmt.Println("Before", "GC CPU", before.GCCPUFraction, "TotalAllocs", before.TotalAlloc, "Frees", before.Frees, "PauseNs Total", before.PauseTotalNs) 289 fmt.Println("After ", "GC CPU", after.GCCPUFraction, "TotalAllocs", after.TotalAlloc, "Frees", after.Frees, "PauseNs Total", after.PauseTotalNs) 290 println() 291 } 292 } 293 294 randomRangeSizes := make([]uintptr, 0, 256) 295 for i := 0; i < 1000; i++ { 296 randomRangeSizes = append(randomRangeSizes, uintptr(randomRange(min, max))) 297 } 298 299 for i := 0; i < b.N; i++ { 300 size := randomRangeSizes[i%len(randomRangeSizes)] 301 b.SetBytes(int64(size)) 302 Free(Malloc(size)) 303 } 304 305 if runTLSF { 306 b.Run("tlsf malloc", func(b *testing.B) { 307 a := tlsf.NewHeap(50) 308 runtime.GC() 309 runtime.GC() 310 var before runtime.MemStats 311 runtime.ReadMemStats(&before) 312 b.ReportAllocs() 313 b.ResetTimer() 314 for i := 0; i < b.N; i++ { 315 size := randomRangeSizes[i%len(randomRangeSizes)] 316 b.SetBytes(int64(size)) 317 a.Free(a.Alloc(size)) 318 } 319 b.StopTimer() 320 var after runtime.MemStats 321 runtime.ReadMemStats(&after) 322 doAfter(before, after) 323 }) 324 b.Run("tlsf sync malloc", func(b *testing.B) { 325 a := tlsf.NewHeap(50).ToSync() 326 runtime.GC() 327 runtime.GC() 328 var before runtime.MemStats 329 runtime.ReadMemStats(&before) 330 b.ReportAllocs() 331 b.ResetTimer() 332 for i := 0; i < b.N; i++ { 333 size := randomRangeSizes[i%len(randomRangeSizes)] 334 b.SetBytes(int64(size)) 335 a.Free(a.Alloc(size)) 336 } 337 b.StopTimer() 338 var after runtime.MemStats 339 runtime.ReadMemStats(&after) 340 doAfter(before, after) 341 }) 342 b.Run("tlsf calloc", func(b *testing.B) { 343 a := tlsf.NewHeap(50) 344 runtime.GC() 345 runtime.GC() 346 var before runtime.MemStats 347 runtime.ReadMemStats(&before) 348 b.ReportAllocs() 349 b.ResetTimer() 350 for i := 0; i < b.N; i++ { 351 size := randomRangeSizes[i%len(randomRangeSizes)] 352 b.SetBytes(int64(size)) 353 a.Free(a.AllocZeroed(size)) 354 } 355 b.StopTimer() 356 var after runtime.MemStats 357 runtime.ReadMemStats(&after) 358 doAfter(before, after) 359 }) 360 } 361 b.Run("rpmalloc", func(b *testing.B) { 362 runtime.GC() 363 runtime.GC() 364 var before runtime.MemStats 365 runtime.ReadMemStats(&before) 366 b.ReportAllocs() 367 b.ResetTimer() 368 for i := 0; i < b.N; i++ { 369 size := randomRangeSizes[i%len(randomRangeSizes)] 370 b.SetBytes(int64(size)) 371 Free(Malloc(size)) 372 } 373 b.StopTimer() 374 var after runtime.MemStats 375 runtime.ReadMemStats(&after) 376 doAfter(before, after) 377 }) 378 b.Run("rpmalloc zeroed", func(b *testing.B) { 379 runtime.GC() 380 runtime.GC() 381 var before runtime.MemStats 382 runtime.ReadMemStats(&before) 383 b.ReportAllocs() 384 b.ResetTimer() 385 for i := 0; i < b.N; i++ { 386 size := randomRangeSizes[i%len(randomRangeSizes)] 387 b.SetBytes(int64(size)) 388 Free(MallocZeroed(size)) 389 } 390 b.StopTimer() 391 var after runtime.MemStats 392 runtime.ReadMemStats(&after) 393 doAfter(before, after) 394 }) 395 //b.Run("rpmalloc zeroed hybrid", func(b *testing.B) { 396 // runtime.GC() 397 // runtime.GC() 398 // var before runtime.MemStats 399 // runtime.ReadMemStats(&before) 400 // b.ReportAllocs() 401 // b.ResetTimer() 402 // for i := 0; i < b.N; i++ { 403 // size := randomRangeSizes[i%len(randomRangeSizes)] 404 // b.SetBytes(int64(size)) 405 // m := Malloc(size) 406 // Zero(unsafe.Pointer(m), size) 407 // Free(m) 408 // } 409 // b.StopTimer() 410 // var after runtime.MemStats 411 // runtime.ReadMemStats(&after) 412 // doAfter(before, after) 413 //}) 414 b.Run("rpmalloc calloc", func(b *testing.B) { 415 runtime.GC() 416 runtime.GC() 417 var before runtime.MemStats 418 runtime.ReadMemStats(&before) 419 b.ReportAllocs() 420 b.ResetTimer() 421 for i := 0; i < b.N; i++ { 422 size := randomRangeSizes[i%len(randomRangeSizes)] 423 b.SetBytes(int64(size)) 424 Free(Calloc(1, size)) 425 } 426 b.StopTimer() 427 var after runtime.MemStats 428 runtime.ReadMemStats(&after) 429 doAfter(before, after) 430 }) 431 432 b.Run("Go GC pool", func(b *testing.B) { 433 runtime.GC() 434 runtime.GC() 435 var before runtime.MemStats 436 runtime.ReadMemStats(&before) 437 b.ReportAllocs() 438 b.ResetTimer() 439 for i := 0; i < b.N; i++ { 440 size := randomRangeSizes[i%len(randomRangeSizes)] 441 b.SetBytes(int64(size)) 442 PutBytes(GetBytes(int(size))) 443 } 444 b.StopTimer() 445 var after runtime.MemStats 446 runtime.ReadMemStats(&after) 447 doAfter(before, after) 448 }) 449 450 b.Run("Go GC pool zeroed", func(b *testing.B) { 451 runtime.GC() 452 runtime.GC() 453 var before runtime.MemStats 454 runtime.ReadMemStats(&before) 455 b.ReportAllocs() 456 b.ResetTimer() 457 for i := 0; i < b.N; i++ { 458 size := randomRangeSizes[i%len(randomRangeSizes)] 459 b.SetBytes(int64(size)) 460 PutBytes(GetBytesZeroed(int(size))) 461 } 462 b.StopTimer() 463 var after runtime.MemStats 464 runtime.ReadMemStats(&after) 465 doAfter(before, after) 466 }) 467 468 b.Run("Go GC alloc", func(b *testing.B) { 469 runtime.GC() 470 runtime.GC() 471 var before runtime.MemStats 472 runtime.ReadMemStats(&before) 473 b.ReportAllocs() 474 b.ResetTimer() 475 for i := 0; i < b.N; i++ { 476 size := randomRangeSizes[i%len(randomRangeSizes)] 477 b.SetBytes(int64(size)) 478 _ = make([]byte, 0, size) 479 } 480 b.StopTimer() 481 var after runtime.MemStats 482 runtime.ReadMemStats(&after) 483 doAfter(before, after) 484 }) 485 } 486 487 var ( 488 pool1 = &sync.Pool{New: func() interface{} { 489 return make([]byte, 1) 490 }} 491 pool2 = &sync.Pool{New: func() interface{} { 492 return make([]byte, 2) 493 }} 494 pool4 = &sync.Pool{New: func() interface{} { 495 return make([]byte, 4) 496 }} 497 pool8 = &sync.Pool{New: func() interface{} { 498 return make([]byte, 8) 499 }} 500 pool12 = &sync.Pool{New: func() interface{} { 501 return make([]byte, 12) 502 }} 503 pool16 = &sync.Pool{New: func() interface{} { 504 return make([]byte, 16) 505 }} 506 pool24 = &sync.Pool{New: func() interface{} { 507 return make([]byte, 24) 508 }} 509 pool32 = &sync.Pool{New: func() interface{} { 510 return make([]byte, 32) 511 }} 512 pool40 = &sync.Pool{New: func() interface{} { 513 return make([]byte, 40) 514 }} 515 pool48 = &sync.Pool{New: func() interface{} { 516 return make([]byte, 48) 517 }} 518 pool56 = &sync.Pool{New: func() interface{} { 519 return make([]byte, 56) 520 }} 521 pool64 = &sync.Pool{New: func() interface{} { 522 return make([]byte, 64) 523 }} 524 pool72 = &sync.Pool{New: func() interface{} { 525 return make([]byte, 72) 526 }} 527 pool96 = &sync.Pool{New: func() interface{} { 528 return make([]byte, 96) 529 }} 530 pool128 = &sync.Pool{New: func() interface{} { 531 return make([]byte, 128) 532 }} 533 pool192 = &sync.Pool{New: func() interface{} { 534 return make([]byte, 192) 535 }} 536 pool256 = &sync.Pool{New: func() interface{} { 537 return make([]byte, 256) 538 }} 539 pool384 = &sync.Pool{New: func() interface{} { 540 return make([]byte, 384) 541 }} 542 pool512 = &sync.Pool{New: func() interface{} { 543 return make([]byte, 512) 544 }} 545 pool768 = &sync.Pool{New: func() interface{} { 546 return make([]byte, 768) 547 }} 548 pool1024 = &sync.Pool{New: func() interface{} { 549 return make([]byte, 1024) 550 }} 551 pool2048 = &sync.Pool{New: func() interface{} { 552 return make([]byte, 2048) 553 }} 554 pool4096 = &sync.Pool{New: func() interface{} { 555 return make([]byte, 4096) 556 }} 557 pool8192 = &sync.Pool{New: func() interface{} { 558 return make([]byte, 8192) 559 }} 560 pool16384 = &sync.Pool{New: func() interface{} { 561 return make([]byte, 16384) 562 }} 563 pool32768 = &sync.Pool{New: func() interface{} { 564 return make([]byte, 32768) 565 }} 566 pool65536 = &sync.Pool{New: func() interface{} { 567 return make([]byte, 65536) 568 }} 569 ) 570 571 // Zero clears n bytes starting at ptr. 572 // 573 // Usually you should use typedmemclr. memclrNoHeapPointers should be 574 // used only when the caller knows that *ptr contains no heap pointers 575 // because either: 576 // 577 // *ptr is initialized memory and its type is pointer-free, or 578 // 579 // *ptr is uninitialized memory (e.g., memory that's being reused 580 // for a new allocation) and hence contains only "junk". 581 // 582 // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n 583 // is a multiple of the pointer size, then any pointer-aligned, 584 // pointer-sized portion is cleared atomically. Despite the function 585 // name, this is necessary because this function is the underlying 586 // implementation of typedmemclr and memclrHasPointers. See the doc of 587 // Memmove for more details. 588 // 589 // The (CPU-specific) implementations of this function are in memclr_*.s. 590 // 591 //go:noescape 592 //go:linkname Zero runtime.memclrNoHeapPointers 593 func Zero(ptr unsafe.Pointer, n uintptr) 594 595 func GetBytesZeroed(n int) []byte { 596 b := GetBytes(n) 597 Zero(unsafe.Pointer(&b[0]), uintptr(cap(b))) 598 return b 599 } 600 601 func GetBytes(n int) []byte { 602 v := ceilToPowerOfTwo(n) 603 switch v { 604 case 0, 1: 605 return pool1.Get().([]byte)[:n] 606 case 2: 607 return pool2.Get().([]byte)[:n] 608 case 4: 609 return pool4.Get().([]byte)[:n] 610 case 8: 611 return pool8.Get().([]byte)[:n] 612 case 16: 613 return pool16.Get().([]byte)[:n] 614 case 24: 615 return pool24.Get().([]byte)[:n] 616 case 32: 617 return pool32.Get().([]byte)[:n] 618 case 64: 619 switch { 620 case n < 41: 621 return pool40.Get().([]byte)[:n] 622 case n < 49: 623 return pool48.Get().([]byte)[:n] 624 case n < 57: 625 return pool56.Get().([]byte)[:n] 626 } 627 return pool64.Get().([]byte)[:n] 628 case 128: 629 switch { 630 case n < 73: 631 return pool72.Get().([]byte)[:n] 632 case n < 97: 633 return pool96.Get().([]byte)[:n] 634 } 635 return pool128.Get().([]byte)[:n] 636 case 256: 637 switch { 638 case n < 193: 639 return pool192.Get().([]byte)[:n] 640 } 641 return pool256.Get().([]byte)[:n] 642 case 512: 643 if n <= 384 { 644 return pool384.Get().([]byte) 645 } 646 return pool512.Get().([]byte)[:n] 647 case 1024: 648 if n <= 768 { 649 return pool768.Get().([]byte)[:n] 650 } 651 return pool1024.Get().([]byte)[:n] 652 case 2048: 653 return pool2048.Get().([]byte)[:n] 654 case 4096: 655 return pool4096.Get().([]byte)[:n] 656 case 8192: 657 return pool8192.Get().([]byte)[:n] 658 case 16384: 659 return pool16384.Get().([]byte)[:n] 660 case 32768: 661 return pool32768.Get().([]byte)[:n] 662 case 65536: 663 return pool65536.Get().([]byte)[:n] 664 } 665 666 return make([]byte, n) 667 } 668 669 func PutBytes(b []byte) { 670 switch cap(b) { 671 case 1: 672 pool1.Put(b) 673 case 2: 674 pool2.Put(b) 675 case 4: 676 pool4.Put(b) 677 case 8: 678 pool8.Put(b) 679 case 12: 680 pool12.Put(b) 681 case 16: 682 pool16.Put(b) 683 case 24: 684 pool24.Put(b) 685 case 32: 686 pool32.Put(b) 687 case 40: 688 pool40.Put(b) 689 case 48: 690 pool48.Put(b) 691 case 56: 692 pool56.Put(b) 693 case 64: 694 pool64.Put(b) 695 case 72: 696 pool72.Put(b) 697 case 96: 698 pool96.Put(b) 699 case 128: 700 pool128.Put(b) 701 case 192: 702 pool192.Put(b) 703 case 256: 704 pool256.Put(b) 705 case 384: 706 pool384.Put(b) 707 case 512: 708 pool512.Put(b) 709 case 768: 710 pool768.Put(b) 711 case 1024: 712 pool1024.Put(b) 713 case 2048: 714 pool2048.Put(b) 715 case 4096: 716 pool4096.Put(b) 717 case 8192: 718 pool8192.Put(b) 719 case 16384: 720 pool16384.Put(b) 721 case 32768: 722 pool32768.Put(b) 723 case 65536: 724 pool65536.Put(b) 725 } 726 } 727 728 const ( 729 bitsize = 32 << (^uint(0) >> 63) 730 maxint = int(1<<(bitsize-1) - 1) 731 maxintHeadBit = 1 << (bitsize - 2) 732 ) 733 734 // LogarithmicRange iterates from ceiled to power of two min to max, 735 // calling cb on each iteration. 736 func LogarithmicRange(min, max int, cb func(int)) { 737 if min == 0 { 738 min = 1 739 } 740 for n := ceilToPowerOfTwo(min); n <= max; n <<= 1 { 741 cb(n) 742 } 743 } 744 745 // IsPowerOfTwo reports whether given integer is a power of two. 746 func IsPowerOfTwo(n int) bool { 747 return n&(n-1) == 0 748 } 749 750 // Identity is identity. 751 func Identity(n int) int { 752 return n 753 } 754 755 // ceilToPowerOfTwo returns the least power of two integer value greater than 756 // or equal to n. 757 func ceilToPowerOfTwo(n int) int { 758 if n&maxintHeadBit != 0 && n > maxintHeadBit { 759 panic("argument is too large") 760 } 761 if n <= 2 { 762 return n 763 } 764 n-- 765 n = fillBits(n) 766 n++ 767 return n 768 } 769 770 // FloorToPowerOfTwo returns the greatest power of two integer value less than 771 // or equal to n. 772 func FloorToPowerOfTwo(n int) int { 773 if n <= 2 { 774 return n 775 } 776 n = fillBits(n) 777 n >>= 1 778 n++ 779 return n 780 } 781 782 func fillBits(n int) int { 783 n |= n >> 1 784 n |= n >> 2 785 n |= n >> 4 786 n |= n >> 8 787 n |= n >> 16 788 n |= n >> 32 789 return n 790 }