github.com/angenalZZZ/gofunc@v0.0.0-20210507121333-48ff1be3917b/net/pool/ringbuffer/ringbuffer.go (about) 1 package ringbuffer 2 3 import ( 4 "sort" 5 "sync" 6 "sync/atomic" 7 8 "github.com/angenalZZZ/gofunc/net/ringbuffer" 9 ) 10 11 const ( 12 minBitSize = 6 // 2**6=64 is a CPU cache line size 13 steps = 20 14 15 minSize = 1 << minBitSize 16 //maxSize = 1 << (minBitSize + steps - 1) 17 18 calibrateCallsThreshold = 42000 19 maxPercentile = 0.95 20 ) 21 22 // RingBuffer is the alias of ringbuffer.RingBuffer 23 type RingBuffer = ringbuffer.RingBuffer 24 25 // Pool represents byte buffer pool. 26 // 27 // Distinct pools may be used for distinct types of byte buffers. 28 // Properly determined byte buffer types with their own pools may help reducing 29 // memory waste. 30 type Pool struct { 31 calls [steps]uint64 32 calibrating uint64 33 34 defaultSize uint64 35 maxSize uint64 36 37 pool sync.Pool 38 } 39 40 var defaultPool Pool 41 42 // Get returns an empty byte buffer from the pool. 43 // 44 // Got byte buffer may be returned to the pool via Put call. 45 // This reduces the number of memory allocations required for byte buffer 46 // management. 47 func Get() *RingBuffer { return defaultPool.Get() } 48 49 // Get returns new byte buffer with zero length. 50 // 51 // The byte buffer may be returned to the pool via Put after the use 52 // in order to minimize GC overhead. 53 func (p *Pool) Get() *ringbuffer.RingBuffer { 54 v := p.pool.Get() 55 if v != nil { 56 return v.(*RingBuffer) 57 } 58 return ringbuffer.New(int(atomic.LoadUint64(&p.defaultSize))) 59 } 60 61 // Put returns byte buffer to the pool. 62 // 63 // ByteBuffer.B mustn't be touched after returning it to the pool. 64 // Otherwise data races will occur. 65 func Put(b *RingBuffer) { defaultPool.Put(b) } 66 67 // Put releases byte buffer obtained via Get to the pool. 68 // 69 // The buffer mustn't be accessed after returning to the pool. 70 func (p *Pool) Put(b *RingBuffer) { 71 idx := index(b.Len()) 72 73 if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { 74 p.calibrate() 75 } 76 77 maxSize := int(atomic.LoadUint64(&p.maxSize)) 78 if maxSize == 0 || b.Cap() <= maxSize { 79 b.Reset() 80 p.pool.Put(b) 81 } 82 } 83 84 func (p *Pool) calibrate() { 85 if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { 86 return 87 } 88 89 a := make(callSizes, 0, steps) 90 var callsSum uint64 91 for i := uint64(0); i < steps; i++ { 92 calls := atomic.SwapUint64(&p.calls[i], 0) 93 callsSum += calls 94 a = append(a, callSize{ 95 calls: calls, 96 size: minSize << i, 97 }) 98 } 99 sort.Sort(a) 100 101 defaultSize := a[0].size 102 maxSize := defaultSize 103 104 maxSum := uint64(float64(callsSum) * maxPercentile) 105 callsSum = 0 106 for i := 0; i < steps; i++ { 107 if callsSum > maxSum { 108 break 109 } 110 callsSum += a[i].calls 111 size := a[i].size 112 if size > maxSize { 113 maxSize = size 114 } 115 } 116 117 atomic.StoreUint64(&p.defaultSize, defaultSize) 118 atomic.StoreUint64(&p.maxSize, maxSize) 119 120 atomic.StoreUint64(&p.calibrating, 0) 121 } 122 123 type callSize struct { 124 calls uint64 125 size uint64 126 } 127 128 type callSizes []callSize 129 130 func (ci callSizes) Len() int { 131 return len(ci) 132 } 133 134 func (ci callSizes) Less(i, j int) bool { 135 return ci[i].calls > ci[j].calls 136 } 137 138 func (ci callSizes) Swap(i, j int) { 139 ci[i], ci[j] = ci[j], ci[i] 140 } 141 142 func index(n int) int { 143 n-- 144 n >>= minBitSize 145 idx := 0 146 for n > 0 { 147 n >>= 1 148 idx++ 149 } 150 if idx >= steps { 151 idx = steps - 1 152 } 153 return idx 154 }