github.com/moontrade/unsafe@v0.9.1/memory/rpmalloc/rpmalloc_test.go (about)

     1  package rpmalloc
     2  
     3  import (
     4  	"fmt"
     5  	"math/rand"
     6  	"runtime"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  	"unsafe"
    11  
    12  	"github.com/moontrade/unsafe/memory/tlsf"
    13  )
    14  
    15  func TestCall(t *testing.T) {
    16  	Malloc(64)
    17  }
    18  
    19  func TestGlobalAlloc(t *testing.T) {
    20  
    21  	Malloc(128)
    22  	var wg = &sync.WaitGroup{}
    23  
    24  	wg.Add(100)
    25  	for i := 0; i < 100; i++ {
    26  		go func() {
    27  			runtime.LockOSThread()
    28  			defer wg.Done()
    29  
    30  			Free(Malloc(128))
    31  			a := StdMalloc(24)
    32  			StdFree(a)
    33  
    34  			StdFree(Malloc(64))
    35  		}()
    36  	}
    37  
    38  	wg.Wait()
    39  	a := StdMalloc(24)
    40  	StdFree(a)
    41  
    42  	//time.Sleep(time.Hour)
    43  	//directPtr := AllocDirect(32)
    44  	//FreeDirect(directPtr)
    45  	//HookDirect()
    46  	//Hook()
    47  	//InitThread()
    48  	//a = Malloc(24)
    49  	//println("usable size for", 24, uint(UsableSize(a)))
    50  	//Free(a)
    51  	//
    52  	//a, c := MallocCap(24)
    53  	//println("size", 24, "cap", c)
    54  	//Free(a)
    55  	//
    56  	//a = Malloc(32)
    57  	//println("usable size for", 32, uint(UsableSize(a)))
    58  	//Free(a)
    59  
    60  	//for i := 0; i < 100; i++ {
    61  	//	go func() {
    62  	//		InitThread()
    63  	//		Free(Malloc(32))
    64  	//	}()
    65  	//}
    66  }
    67  
    68  func BenchmarkCGO(b *testing.B) {
    69  	//b.Run("cgo", func(b *testing.B) {
    70  	//	for i := 0; i < b.N; i++ {
    71  	//		Stub()
    72  	//	}
    73  	//})
    74  	//
    75  	//b.Run("direct", func(b *testing.B) {
    76  	//	for i := 0; i < b.N; i++ {
    77  	//		StubDirect()
    78  	//	}
    79  	//})
    80  	//
    81  	//b.Run("alloc/free direct", func(b *testing.B) {
    82  	//	b.ResetTimer()
    83  	//	b.ReportAllocs()
    84  	//	for i := 0; i < b.N; i++ {
    85  	//		FreeDirect(AllocDirect(32))
    86  	//	}
    87  	//})
    88  
    89  	b.Run("malloc/free cgo", func(b *testing.B) {
    90  		InitThread()
    91  		b.ResetTimer()
    92  		b.ReportAllocs()
    93  		for i := 0; i < b.N; i++ {
    94  			Free(Malloc(32))
    95  		}
    96  	})
    97  
    98  	b.Run("malloc_cap/free cgo", func(b *testing.B) {
    99  		InitThread()
   100  		var (
   101  			ptr, c uintptr
   102  		)
   103  		b.ResetTimer()
   104  		b.ReportAllocs()
   105  		for i := 0; i < b.N; i++ {
   106  			ptr, c = MallocCap(8)
   107  			_ = c
   108  			Free(ptr)
   109  		}
   110  	})
   111  
   112  	b.Run("calloc/free cgo", func(b *testing.B) {
   113  		InitThread()
   114  		b.ResetTimer()
   115  		b.ReportAllocs()
   116  		for i := 0; i < b.N; i++ {
   117  			Free(Calloc(1, 32))
   118  		}
   119  	})
   120  
   121  	b.Run("tlsf alloc/free tlsf", func(b *testing.B) {
   122  		a := tlsf.NewHeap(1)
   123  		b.ResetTimer()
   124  		b.ReportAllocs()
   125  		for i := 0; i < b.N; i++ {
   126  			a.Free(a.Alloc(32))
   127  		}
   128  	})
   129  
   130  	//b.Run("alloc direct 32", func(b *testing.B) {
   131  	//	for i := 0; i < b.N; i++ {
   132  	//		AllocDirect32()
   133  	//	}
   134  	//})
   135  }
   136  
   137  func Test_AllocatorThrash(t *testing.T) {
   138  	statsBefore := runtime.MemStats{}
   139  	runtime.ReadMemStats(&statsBefore)
   140  	thrashAllocator(false,
   141  		1000000, 100, 15000, 21000,
   142  		randomSize(0.95, 16, 48),
   143  		randomSize(0.95, 48, 192),
   144  		randomSize(0.55, 64, 512),
   145  		//randomSize(0.70, 128, 512),
   146  		//randomSize(0.15, 128, 512),
   147  		//randomSize(0.30, 128, 1024),
   148  	)
   149  
   150  	var stats ThreadStats
   151  	ReadThreadStats(&stats)
   152  
   153  	var globalStats GlobalStats
   154  	ReadGlobalStats(&globalStats)
   155  
   156  	var statsAfter runtime.MemStats
   157  	runtime.ReadMemStats(&statsAfter)
   158  	//fmt.Println("SysAllocator Size", a.Size())
   159  
   160  	fmt.Println("GCStats Before", statsBefore)
   161  	fmt.Println("GCStats After", statsAfter)
   162  
   163  	//thrashAllocator(newAllocator(2), 100000, 100, 12000, 17000,
   164  	//	randomSize(0.80, 24, 96),
   165  	//	//randomSize(0.70, 128, 512),
   166  	//	//randomSize(0.15, 128, 512),
   167  	//	//randomSize(0.30, 128, 1024),
   168  	//)
   169  }
   170  
   171  type sizeClass struct {
   172  	pct      float64
   173  	min, max int
   174  	next     func() int
   175  }
   176  
   177  func randomSize(pct float64, min, max int) *sizeClass {
   178  	sz := &sizeClass{pct, min, max, nil}
   179  	sz.next = sz.nextRandom
   180  	return sz
   181  }
   182  
   183  func (s *sizeClass) nextRandom() int {
   184  	if s.max == s.min {
   185  		return s.max
   186  	}
   187  	return rand.Intn(s.max-s.min) + s.min
   188  }
   189  
   190  func thrashAllocator(
   191  	shuffle bool,
   192  	iterations, allocsPerIteration, minAllocs, maxAllocs int,
   193  	sizeClasses ...*sizeClass,
   194  ) {
   195  	type allocation struct {
   196  		ptr  uintptr
   197  		size int
   198  	}
   199  
   200  	sz := make([]int, 0, allocsPerIteration)
   201  	for _, sc := range sizeClasses {
   202  		for i := 0; i < int(float64(allocsPerIteration)*sc.pct); i++ {
   203  			sz = append(sz, sc.next())
   204  		}
   205  	}
   206  
   207  	allocs := make([]allocation, 0, maxAllocs)
   208  	allocSize := 0
   209  	totalAllocs := 0
   210  	totalFrees := 0
   211  	maxAllocCount := 0
   212  	maxAllocSize := 0
   213  
   214  	if shuffle {
   215  		rand.Seed(time.Now().UnixNano())
   216  	}
   217  
   218  	start := time.Now()
   219  	for i := 0; i < iterations; i++ {
   220  		if shuffle {
   221  			rand.Shuffle(len(sz), func(i, j int) { sz[i], sz[j] = sz[j], sz[i] })
   222  		}
   223  
   224  		for _, size := range sz {
   225  			allocs = append(allocs, allocation{
   226  				ptr:  Malloc(uintptr(size)), //tlsfalloc(uintptr(size)),
   227  				size: size,
   228  			})
   229  			allocSize += size
   230  		}
   231  		totalAllocs += len(sz)
   232  
   233  		if maxAllocCount < len(allocs) {
   234  			maxAllocCount = len(allocs)
   235  		}
   236  		if allocSize > maxAllocSize {
   237  			maxAllocSize = allocSize
   238  		}
   239  
   240  		if len(allocs) < minAllocs || len(allocs) < maxAllocs {
   241  			continue
   242  		}
   243  
   244  		//rand.Shuffle(len(allocs), func(i, j int) { allocs[i], allocs[j] = allocs[j], allocs[i] })
   245  		max := randomRange(minAllocs, maxAllocs)
   246  		//max := maxAllocs
   247  		totalFrees += len(allocs) - max
   248  		for x := max; x < len(allocs); x++ {
   249  			alloc := allocs[x]
   250  			Free(alloc.ptr)
   251  			allocSize -= alloc.size
   252  		}
   253  		allocs = allocs[:max]
   254  	}
   255  
   256  	elapsed := time.Now().Sub(start)
   257  	seconds := float64(elapsed) / float64(time.Second)
   258  	println("total time			", elapsed.String())
   259  	fmt.Printf("allocs per sec		 %.1f million / sec\n", float64(float64(totalAllocs)/seconds/1000000))
   260  	//println("allocs per sec		", float64(totalAllocs) / seconds)
   261  	println("alloc bytes			", allocSize)
   262  	println("alloc count			", len(allocs))
   263  	println("total allocs		", totalAllocs)
   264  	println("total frees			", totalFrees)
   265  	println("total frees			", totalFrees)
   266  	//println("memory pages		", allocator.Pages)
   267  	//println("heap size			", allocator.HeapSize)
   268  	//println("free size			", allocator.FreeSize)
   269  	//println("alloc size			", allocator.AllocSize)
   270  	//println("alloc size			", AllocSize)
   271  	println("max allocs			", maxAllocCount)
   272  	//println("max alloc size		", allocator.PeakAllocSize)
   273  	//println("fragmentation		", fmt.Sprintf("%.2f", allocator.Stats.Fragmentation()))
   274  }
   275  
   276  func randomRange(min, max int) int {
   277  	return rand.Intn(max-min) + min
   278  }
   279  
   280  func BenchmarkAllocator_Alloc(b *testing.B) {
   281  	var (
   282  		//min, max    = 36, 8092
   283  		min, max    = 16, 2048
   284  		runTLSF     = true
   285  		showGCStats = false
   286  	)
   287  	doAfter := func(before, after runtime.MemStats) {
   288  		if showGCStats {
   289  			fmt.Println("Before", "GC CPU", before.GCCPUFraction, "TotalAllocs", before.TotalAlloc, "Frees", before.Frees, "PauseNs Total", before.PauseTotalNs)
   290  			fmt.Println("After ", "GC CPU", after.GCCPUFraction, "TotalAllocs", after.TotalAlloc, "Frees", after.Frees, "PauseNs Total", after.PauseTotalNs)
   291  			println()
   292  		}
   293  	}
   294  
   295  	randomRangeSizes := make([]uintptr, 0, 256)
   296  	for i := 0; i < 1000; i++ {
   297  		randomRangeSizes = append(randomRangeSizes, uintptr(randomRange(min, max)))
   298  	}
   299  
   300  	for i := 0; i < b.N; i++ {
   301  		size := randomRangeSizes[i%len(randomRangeSizes)]
   302  		b.SetBytes(int64(size))
   303  		p := Malloc(size)
   304  		Free(p)
   305  	}
   306  
   307  	if runTLSF {
   308  		b.Run("tlsf malloc", func(b *testing.B) {
   309  			a := tlsf.NewHeap(50)
   310  			runtime.GC()
   311  			runtime.GC()
   312  			var before runtime.MemStats
   313  			runtime.ReadMemStats(&before)
   314  			b.ReportAllocs()
   315  			b.ResetTimer()
   316  			for i := 0; i < b.N; i++ {
   317  				size := randomRangeSizes[i%len(randomRangeSizes)]
   318  				b.SetBytes(int64(size))
   319  				a.Free(a.Alloc(size))
   320  			}
   321  			b.StopTimer()
   322  			var after runtime.MemStats
   323  			runtime.ReadMemStats(&after)
   324  			doAfter(before, after)
   325  		})
   326  		b.Run("tlsf sync malloc", func(b *testing.B) {
   327  			a := tlsf.NewHeap(50).ToSync()
   328  			runtime.GC()
   329  			runtime.GC()
   330  			var before runtime.MemStats
   331  			runtime.ReadMemStats(&before)
   332  			b.ReportAllocs()
   333  			b.ResetTimer()
   334  			for i := 0; i < b.N; i++ {
   335  				size := randomRangeSizes[i%len(randomRangeSizes)]
   336  				b.SetBytes(int64(size))
   337  				a.Free(a.Alloc(size))
   338  			}
   339  			b.StopTimer()
   340  			var after runtime.MemStats
   341  			runtime.ReadMemStats(&after)
   342  			doAfter(before, after)
   343  		})
   344  		b.Run("tlsf calloc", func(b *testing.B) {
   345  			a := tlsf.NewHeap(50)
   346  			runtime.GC()
   347  			runtime.GC()
   348  			var before runtime.MemStats
   349  			runtime.ReadMemStats(&before)
   350  			b.ReportAllocs()
   351  			b.ResetTimer()
   352  			for i := 0; i < b.N; i++ {
   353  				size := randomRangeSizes[i%len(randomRangeSizes)]
   354  				b.SetBytes(int64(size))
   355  				a.Free(a.AllocZeroed(size))
   356  			}
   357  			b.StopTimer()
   358  			var after runtime.MemStats
   359  			runtime.ReadMemStats(&after)
   360  			doAfter(before, after)
   361  		})
   362  	}
   363  	b.Run("rpmalloc", func(b *testing.B) {
   364  		runtime.GC()
   365  		runtime.GC()
   366  		var before runtime.MemStats
   367  		runtime.ReadMemStats(&before)
   368  		b.ReportAllocs()
   369  		b.ResetTimer()
   370  		for i := 0; i < b.N; i++ {
   371  			size := randomRangeSizes[i%len(randomRangeSizes)]
   372  			b.SetBytes(int64(size))
   373  			Free(Malloc(size))
   374  		}
   375  		b.StopTimer()
   376  		var after runtime.MemStats
   377  		runtime.ReadMemStats(&after)
   378  		doAfter(before, after)
   379  	})
   380  	b.Run("rpmalloc zeroed", func(b *testing.B) {
   381  		runtime.GC()
   382  		runtime.GC()
   383  		var before runtime.MemStats
   384  		runtime.ReadMemStats(&before)
   385  		b.ReportAllocs()
   386  		b.ResetTimer()
   387  		for i := 0; i < b.N; i++ {
   388  			size := randomRangeSizes[i%len(randomRangeSizes)]
   389  			b.SetBytes(int64(size))
   390  			Free(MallocZeroed(size))
   391  		}
   392  		b.StopTimer()
   393  		var after runtime.MemStats
   394  		runtime.ReadMemStats(&after)
   395  		doAfter(before, after)
   396  	})
   397  	//b.Run("rpmalloc zeroed hybrid", func(b *testing.B) {
   398  	//	runtime.GC()
   399  	//	runtime.GC()
   400  	//	var before runtime.MemStats
   401  	//	runtime.ReadMemStats(&before)
   402  	//	b.ReportAllocs()
   403  	//	b.ResetTimer()
   404  	//	for i := 0; i < b.N; i++ {
   405  	//		size := randomRangeSizes[i%len(randomRangeSizes)]
   406  	//		b.SetBytes(int64(size))
   407  	//		m := Malloc(size)
   408  	//		Zero(unsafe.Pointer(m), size)
   409  	//		Free(m)
   410  	//	}
   411  	//	b.StopTimer()
   412  	//	var after runtime.MemStats
   413  	//	runtime.ReadMemStats(&after)
   414  	//	doAfter(before, after)
   415  	//})
   416  	b.Run("rpmalloc calloc", func(b *testing.B) {
   417  		runtime.GC()
   418  		runtime.GC()
   419  		var before runtime.MemStats
   420  		runtime.ReadMemStats(&before)
   421  		b.ReportAllocs()
   422  		b.ResetTimer()
   423  		for i := 0; i < b.N; i++ {
   424  			size := randomRangeSizes[i%len(randomRangeSizes)]
   425  			b.SetBytes(int64(size))
   426  			Free(Calloc(1, size))
   427  		}
   428  		b.StopTimer()
   429  		var after runtime.MemStats
   430  		runtime.ReadMemStats(&after)
   431  		//doAfter(before, after)
   432  	})
   433  
   434  	b.Run("Go GC pool", func(b *testing.B) {
   435  		runtime.GC()
   436  		runtime.GC()
   437  		var before runtime.MemStats
   438  		runtime.ReadMemStats(&before)
   439  		b.ReportAllocs()
   440  		b.ResetTimer()
   441  		for i := 0; i < b.N; i++ {
   442  			size := randomRangeSizes[i%len(randomRangeSizes)]
   443  			b.SetBytes(int64(size))
   444  			PutBytes(GetBytes(int(size)))
   445  		}
   446  		b.StopTimer()
   447  		var after runtime.MemStats
   448  		runtime.ReadMemStats(&after)
   449  		doAfter(before, after)
   450  	})
   451  
   452  	b.Run("Go GC pool zeroed", func(b *testing.B) {
   453  		runtime.GC()
   454  		runtime.GC()
   455  		var before runtime.MemStats
   456  		runtime.ReadMemStats(&before)
   457  		b.ReportAllocs()
   458  		b.ResetTimer()
   459  		for i := 0; i < b.N; i++ {
   460  			size := randomRangeSizes[i%len(randomRangeSizes)]
   461  			b.SetBytes(int64(size))
   462  			PutBytes(GetBytesZeroed(int(size)))
   463  		}
   464  		b.StopTimer()
   465  		var after runtime.MemStats
   466  		runtime.ReadMemStats(&after)
   467  		doAfter(before, after)
   468  	})
   469  
   470  	b.Run("Go GC alloc", func(b *testing.B) {
   471  		runtime.GC()
   472  		runtime.GC()
   473  		var before runtime.MemStats
   474  		runtime.ReadMemStats(&before)
   475  		b.ReportAllocs()
   476  		b.ResetTimer()
   477  		for i := 0; i < b.N; i++ {
   478  			size := randomRangeSizes[i%len(randomRangeSizes)]
   479  			b.SetBytes(int64(size))
   480  			_ = make([]byte, 0, size)
   481  		}
   482  		b.StopTimer()
   483  		var after runtime.MemStats
   484  		runtime.ReadMemStats(&after)
   485  		doAfter(before, after)
   486  	})
   487  }
   488  
   489  var (
   490  	pool1 = &sync.Pool{New: func() interface{} {
   491  		return make([]byte, 1)
   492  	}}
   493  	pool2 = &sync.Pool{New: func() interface{} {
   494  		return make([]byte, 2)
   495  	}}
   496  	pool4 = &sync.Pool{New: func() interface{} {
   497  		return make([]byte, 4)
   498  	}}
   499  	pool8 = &sync.Pool{New: func() interface{} {
   500  		return make([]byte, 8)
   501  	}}
   502  	pool12 = &sync.Pool{New: func() interface{} {
   503  		return make([]byte, 12)
   504  	}}
   505  	pool16 = &sync.Pool{New: func() interface{} {
   506  		return make([]byte, 16)
   507  	}}
   508  	pool24 = &sync.Pool{New: func() interface{} {
   509  		return make([]byte, 24)
   510  	}}
   511  	pool32 = &sync.Pool{New: func() interface{} {
   512  		return make([]byte, 32)
   513  	}}
   514  	pool40 = &sync.Pool{New: func() interface{} {
   515  		return make([]byte, 40)
   516  	}}
   517  	pool48 = &sync.Pool{New: func() interface{} {
   518  		return make([]byte, 48)
   519  	}}
   520  	pool56 = &sync.Pool{New: func() interface{} {
   521  		return make([]byte, 56)
   522  	}}
   523  	pool64 = &sync.Pool{New: func() interface{} {
   524  		return make([]byte, 64)
   525  	}}
   526  	pool72 = &sync.Pool{New: func() interface{} {
   527  		return make([]byte, 72)
   528  	}}
   529  	pool96 = &sync.Pool{New: func() interface{} {
   530  		return make([]byte, 96)
   531  	}}
   532  	pool128 = &sync.Pool{New: func() interface{} {
   533  		return make([]byte, 128)
   534  	}}
   535  	pool192 = &sync.Pool{New: func() interface{} {
   536  		return make([]byte, 192)
   537  	}}
   538  	pool256 = &sync.Pool{New: func() interface{} {
   539  		return make([]byte, 256)
   540  	}}
   541  	pool384 = &sync.Pool{New: func() interface{} {
   542  		return make([]byte, 384)
   543  	}}
   544  	pool512 = &sync.Pool{New: func() interface{} {
   545  		return make([]byte, 512)
   546  	}}
   547  	pool768 = &sync.Pool{New: func() interface{} {
   548  		return make([]byte, 768)
   549  	}}
   550  	pool1024 = &sync.Pool{New: func() interface{} {
   551  		return make([]byte, 1024)
   552  	}}
   553  	pool2048 = &sync.Pool{New: func() interface{} {
   554  		return make([]byte, 2048)
   555  	}}
   556  	pool4096 = &sync.Pool{New: func() interface{} {
   557  		return make([]byte, 4096)
   558  	}}
   559  	pool8192 = &sync.Pool{New: func() interface{} {
   560  		return make([]byte, 8192)
   561  	}}
   562  	pool16384 = &sync.Pool{New: func() interface{} {
   563  		return make([]byte, 16384)
   564  	}}
   565  	pool32768 = &sync.Pool{New: func() interface{} {
   566  		return make([]byte, 32768)
   567  	}}
   568  	pool65536 = &sync.Pool{New: func() interface{} {
   569  		return make([]byte, 65536)
   570  	}}
   571  )
   572  
   573  // Zero clears n bytes starting at ptr.
   574  //
   575  // Usually you should use typedmemclr. memclrNoHeapPointers should be
   576  // used only when the caller knows that *ptr contains no heap pointers
   577  // because either:
   578  //
   579  // *ptr is initialized memory and its type is pointer-free, or
   580  //
   581  // *ptr is uninitialized memory (e.g., memory that's being reused
   582  // for a new allocation) and hence contains only "junk".
   583  //
   584  // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
   585  // is a multiple of the pointer size, then any pointer-aligned,
   586  // pointer-sized portion is cleared atomically. Despite the function
   587  // name, this is necessary because this function is the underlying
   588  // implementation of typedmemclr and memclrHasPointers. See the doc of
   589  // Memmove for more details.
   590  //
   591  // The (CPU-specific) implementations of this function are in memclr_*.s.
   592  //
   593  //go:noescape
   594  //go:linkname Zero runtime.memclrNoHeapPointers
   595  func Zero(ptr unsafe.Pointer, n uintptr)
   596  
   597  func GetBytesZeroed(n int) []byte {
   598  	b := GetBytes(n)
   599  	Zero(unsafe.Pointer(&b[0]), uintptr(cap(b)))
   600  	return b
   601  }
   602  
   603  func GetBytes(n int) []byte {
   604  	v := ceilToPowerOfTwo(n)
   605  	switch v {
   606  	case 0, 1:
   607  		return pool1.Get().([]byte)[:n]
   608  	case 2:
   609  		return pool2.Get().([]byte)[:n]
   610  	case 4:
   611  		return pool4.Get().([]byte)[:n]
   612  	case 8:
   613  		return pool8.Get().([]byte)[:n]
   614  	case 16:
   615  		return pool16.Get().([]byte)[:n]
   616  	case 24:
   617  		return pool24.Get().([]byte)[:n]
   618  	case 32:
   619  		return pool32.Get().([]byte)[:n]
   620  	case 64:
   621  		switch {
   622  		case n < 41:
   623  			return pool40.Get().([]byte)[:n]
   624  		case n < 49:
   625  			return pool48.Get().([]byte)[:n]
   626  		case n < 57:
   627  			return pool56.Get().([]byte)[:n]
   628  		}
   629  		return pool64.Get().([]byte)[:n]
   630  	case 128:
   631  		switch {
   632  		case n < 73:
   633  			return pool72.Get().([]byte)[:n]
   634  		case n < 97:
   635  			return pool96.Get().([]byte)[:n]
   636  		}
   637  		return pool128.Get().([]byte)[:n]
   638  	case 256:
   639  		switch {
   640  		case n < 193:
   641  			return pool192.Get().([]byte)[:n]
   642  		}
   643  		return pool256.Get().([]byte)[:n]
   644  	case 512:
   645  		if n <= 384 {
   646  			return pool384.Get().([]byte)
   647  		}
   648  		return pool512.Get().([]byte)[:n]
   649  	case 1024:
   650  		if n <= 768 {
   651  			return pool768.Get().([]byte)[:n]
   652  		}
   653  		return pool1024.Get().([]byte)[:n]
   654  	case 2048:
   655  		return pool2048.Get().([]byte)[:n]
   656  	case 4096:
   657  		return pool4096.Get().([]byte)[:n]
   658  	case 8192:
   659  		return pool8192.Get().([]byte)[:n]
   660  	case 16384:
   661  		return pool16384.Get().([]byte)[:n]
   662  	case 32768:
   663  		return pool32768.Get().([]byte)[:n]
   664  	case 65536:
   665  		return pool65536.Get().([]byte)[:n]
   666  	}
   667  
   668  	return make([]byte, n)
   669  }
   670  
   671  func PutBytes(b []byte) {
   672  	switch cap(b) {
   673  	case 1:
   674  		pool1.Put(b)
   675  	case 2:
   676  		pool2.Put(b)
   677  	case 4:
   678  		pool4.Put(b)
   679  	case 8:
   680  		pool8.Put(b)
   681  	case 12:
   682  		pool12.Put(b)
   683  	case 16:
   684  		pool16.Put(b)
   685  	case 24:
   686  		pool24.Put(b)
   687  	case 32:
   688  		pool32.Put(b)
   689  	case 40:
   690  		pool40.Put(b)
   691  	case 48:
   692  		pool48.Put(b)
   693  	case 56:
   694  		pool56.Put(b)
   695  	case 64:
   696  		pool64.Put(b)
   697  	case 72:
   698  		pool72.Put(b)
   699  	case 96:
   700  		pool96.Put(b)
   701  	case 128:
   702  		pool128.Put(b)
   703  	case 192:
   704  		pool192.Put(b)
   705  	case 256:
   706  		pool256.Put(b)
   707  	case 384:
   708  		pool384.Put(b)
   709  	case 512:
   710  		pool512.Put(b)
   711  	case 768:
   712  		pool768.Put(b)
   713  	case 1024:
   714  		pool1024.Put(b)
   715  	case 2048:
   716  		pool2048.Put(b)
   717  	case 4096:
   718  		pool4096.Put(b)
   719  	case 8192:
   720  		pool8192.Put(b)
   721  	case 16384:
   722  		pool16384.Put(b)
   723  	case 32768:
   724  		pool32768.Put(b)
   725  	case 65536:
   726  		pool65536.Put(b)
   727  	}
   728  }
   729  
   730  const (
   731  	bitsize       = 32 << (^uint(0) >> 63)
   732  	maxint        = int(1<<(bitsize-1) - 1)
   733  	maxintHeadBit = 1 << (bitsize - 2)
   734  )
   735  
   736  // LogarithmicRange iterates from ceiled to power of two min to max,
   737  // calling cb on each iteration.
   738  func LogarithmicRange(min, max int, cb func(int)) {
   739  	if min == 0 {
   740  		min = 1
   741  	}
   742  	for n := ceilToPowerOfTwo(min); n <= max; n <<= 1 {
   743  		cb(n)
   744  	}
   745  }
   746  
   747  // IsPowerOfTwo reports whether given integer is a power of two.
   748  func IsPowerOfTwo(n int) bool {
   749  	return n&(n-1) == 0
   750  }
   751  
   752  // Identity is identity.
   753  func Identity(n int) int {
   754  	return n
   755  }
   756  
   757  // ceilToPowerOfTwo returns the least power of two integer value greater than
   758  // or equal to n.
   759  func ceilToPowerOfTwo(n int) int {
   760  	if n&maxintHeadBit != 0 && n > maxintHeadBit {
   761  		panic("argument is too large")
   762  	}
   763  	if n <= 2 {
   764  		return n
   765  	}
   766  	n--
   767  	n = fillBits(n)
   768  	n++
   769  	return n
   770  }
   771  
   772  // FloorToPowerOfTwo returns the greatest power of two integer value less than
   773  // or equal to n.
   774  func FloorToPowerOfTwo(n int) int {
   775  	if n <= 2 {
   776  		return n
   777  	}
   778  	n = fillBits(n)
   779  	n >>= 1
   780  	n++
   781  	return n
   782  }
   783  
   784  func fillBits(n int) int {
   785  	n |= n >> 1
   786  	n |= n >> 2
   787  	n |= n >> 4
   788  	n |= n >> 8
   789  	n |= n >> 16
   790  	n |= n >> 32
   791  	return n
   792  }