github.com/jxskiss/gopkg/v2@v2.14.9-0.20240514120614-899f3e7952b4/perf/bbp/pool.go (about) 1 package bbp 2 3 import ( 4 "math" 5 "sync" 6 "sync/atomic" 7 "time" 8 ) 9 10 const ( 11 defaultPoolIdx = 10 // 1024 bytes 12 defaultCalibrateCalls = 10000 13 defaultCalibrateInterval = 3 * time.Minute 14 defaultResizePercentile = 95 15 ) 16 17 // Pool is a byte buffer pool which reuses byte slice. It uses dynamic 18 // calibrating (which is a little atomic operations) to try best to match 19 // the workload. 20 // 21 // Generally, if the size and capacity is known in advance, you may use 22 // the exported function Get(length, capacity) to get a properly sized 23 // byte buffer. However, if the buffer size is uncertain in advance, you may 24 // want to use this Pool. For different workloads, dedicated Pool instances 25 // are recommended, the dynamic calibrating will help to reduce memory waste. 26 // 27 // All Pool instances share the same underlying sized byte slice pools. 28 // The byte buffers provided by Pool has a minimum limit of 64B and a 29 // maximum limit of 32MB, byte slice with size not in the range will be 30 // allocated directly from the operating system, and won't be recycled 31 // for reuse. 32 // 33 // The zero value for Pool is ready to use. A Pool value shall not be 34 // copied after initialized. 35 type Pool struct { 36 r Recorder 37 38 bp sync.Pool // *Buffer 39 } 40 41 // NewPool creates a new Pool instance using given Recorder. 42 // 43 // In most cases, declaring a Pool variable is sufficient to initialize 44 // a Pool. 45 func NewPool(r Recorder) *Pool { 46 r.poolIdx = uintptr(r.getDefaultPoolIdx()) 47 return &Pool{r: r} 48 } 49 50 // Get returns a byte slice buffer from the pool. 51 // The returned buffer may be put back to the pool for reusing. 52 func (p *Pool) Get() []byte { 53 idx := p.r.getPoolIdx() 54 return sizedPools[idx].Get(0) 55 } 56 57 // Put puts back a byte slice buffer to the pool for reusing. 58 // 59 // The buf mustn't be touched after returning it to the pool, 60 // otherwise data races will occur. 61 func (p *Pool) Put(buf []byte) { 62 p.r.Record(len(buf)) 63 put(buf) 64 } 65 66 // GetBuffer returns a Buffer from the pool with dynamic calibrated 67 // default capacity. 68 // The returned Buffer may be put back to the pool for reusing. 69 func (p *Pool) GetBuffer() *Buffer { 70 v := p.bp.Get() 71 if v != nil { 72 return v.(*Buffer) 73 } 74 idx := p.r.getPoolIdx() 75 buf := sizedPools[idx].Get(0) 76 return &Buffer{buf: buf} 77 } 78 79 // PutBuffer puts back a Buffer to the pool for reusing. 80 // 81 // The buf mustn't be touched after returning it to the pool, 82 // otherwise, data races will occur. 83 func (p *Pool) PutBuffer(buf *Buffer) { 84 p.r.Record(len(buf.buf)) 85 if cap(buf.buf) <= maxBufSize { 86 buf.Reset() 87 p.bp.Put(buf) 88 } 89 } 90 91 // Recorder helps to record most frequently used buffer size. 92 // It calibrates the recorded size data in running, thus it can dynamically 93 // adjust according to recent workload. 94 type Recorder struct { 95 96 // DefaultSize optionally configs the initial default size to be used. 97 // Default is 1024 bytes. 98 DefaultSize int 99 100 // CalibrateInterval optionally configs the interval to do calibrating. 101 // Default is 3 minutes. 102 CalibrateInterval time.Duration 103 104 // ResizePercentile optionally configs the percentile to reset the 105 // default size when doing calibrating, the value should be in range 106 // [50, 100). Default is 95. 107 ResizePercentile int 108 109 poolIdx uintptr 110 111 calls [poolSize]int32 112 calibrating uintptr 113 preNano int64 114 preCalls int32 115 } 116 117 // Size returns the current most frequently used buffer size. 118 func (p *Recorder) Size() int { 119 return 1 << p.getPoolIdx() 120 } 121 122 // Record records a used buffer size n. 123 // 124 // The max recordable size is 32MB, if n is larger than 32MB, it records 125 // 32MB. 126 func (p *Recorder) Record(n int) { 127 idx := maxPoolIdx 128 if n < maxBufSize { 129 idx = indexGet(n) 130 } 131 if atomic.AddInt32(&p.calls[idx], -1) < 0 { 132 p.calibrate() 133 } 134 } 135 136 func (p *Recorder) getPoolIdx() int { 137 idx := int(atomic.LoadUintptr(&p.poolIdx)) 138 if idx == 0 { 139 idx = p.getDefaultPoolIdx() 140 atomic.StoreUintptr(&p.poolIdx, uintptr(idx)) 141 } 142 return idx 143 } 144 145 func (p *Recorder) getDefaultPoolIdx() int { 146 if p.DefaultSize > 0 { 147 return indexGet(p.DefaultSize) 148 } 149 return defaultPoolIdx 150 } 151 152 func (p *Recorder) getCalibrateInterval() time.Duration { 153 if p.CalibrateInterval > 0 { 154 return p.CalibrateInterval 155 } 156 return defaultCalibrateInterval 157 } 158 159 func (p *Recorder) getResizePercentile() int { 160 if p.ResizePercentile >= 50 && p.ResizePercentile < 100 { 161 return p.ResizePercentile 162 } 163 return defaultResizePercentile 164 } 165 166 func (p *Recorder) calibrate() { 167 if !atomic.CompareAndSwapUintptr(&p.calibrating, 0, 1) { 168 return 169 } 170 171 preNano := p.preNano 172 preCalls := p.preCalls 173 174 nowNano := time.Now().UnixNano() 175 nextCalls := int32(defaultCalibrateCalls) 176 if preCalls > 0 { 177 interval := p.getCalibrateInterval() 178 preInterval := nowNano - preNano 179 next := uint64(float64(p.preCalls) * float64(interval) / float64(preInterval)) 180 if next < defaultCalibrateCalls { 181 nextCalls = defaultCalibrateCalls 182 } else if next > math.MaxInt32 { 183 nextCalls = math.MaxInt32 184 } else { 185 nextCalls = int32(next) 186 } 187 } 188 p.preNano = nowNano 189 p.preCalls = nextCalls 190 191 var poolIdx int 192 var calls [poolSize]int32 193 var callsSum int64 194 for i := minPoolIdx; i < poolSize; i++ { 195 c := atomic.SwapInt32(&p.calls[i], nextCalls) 196 if preCalls > 0 { 197 c = preCalls - c 198 if c < 0 { 199 c = preCalls 200 } 201 calls[i] = c 202 callsSum += int64(c) 203 } 204 } 205 if preCalls > 0 { 206 pctVal := int64(float64(callsSum) * float64(p.getResizePercentile()) / 100) 207 callsSum = 0 208 for i := minPoolIdx; i < poolSize; i++ { 209 callsSum += int64(calls[i]) 210 if callsSum >= pctVal { 211 poolIdx = i 212 break 213 } 214 } 215 } 216 defaultIdx := p.getDefaultPoolIdx() 217 if poolIdx < defaultIdx { 218 poolIdx = defaultIdx 219 } 220 atomic.StoreUintptr(&p.poolIdx, uintptr(poolIdx)) 221 222 atomic.StoreUintptr(&p.calibrating, 0) 223 }