github.com/pawelgaczynski/gain@v0.4.0-alpha.0.20230821120126-41f1e60a18da/pkg/pool/ringbuffer/ringbuffer.go (about)

     1  // Copyright (c) 2023 Paweł Gaczyński
     2  // Copyright (c) 2019 Andy Pan
     3  // Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
     4  //
     5  // Licensed under the Apache License, Version 2.0 (the "License");
     6  // you may not use this file except in compliance with the License.
     7  // You may obtain a copy of the License at
     8  //
     9  //	http://www.apache.org/licenses/LICENSE-2.0
    10  //
    11  // Unless required by applicable law or agreed to in writing, software
    12  // distributed under the License is distributed on an "AS IS" BASIS,
    13  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  // See the License for the specific language governing permissions and
    15  // limitations under the License.
    16  //
    17  // Use of this source code is governed by a MIT license that can be found
    18  // at https://github.com/valyala/bytebufferpool/blob/master/LICENSE
    19  package ringbuffer
    20  
    21  import (
    22  	"sort"
    23  	"sync/atomic"
    24  
    25  	"github.com/pawelgaczynski/gain/pkg/buffer/magicring"
    26  	"github.com/pawelgaczynski/gain/pkg/pool/sync"
    27  )
    28  
    29  const (
    30  	minBitSize = 12 // 2**6=64 is a CPU cache line size
    31  	steps      = 15
    32  
    33  	minSize = 1 << minBitSize
    34  
    35  	calibrateCallsThreshold = 42000
    36  	maxPercentile           = 0.95
    37  )
    38  
    39  // RingBuffer is the alias of ring.Buffer.
    40  type RingBuffer = magicring.RingBuffer
    41  
    42  // Pool represents ring-buffer pool.
    43  //
    44  // Distinct pools may be used for distinct types of byte buffers.
    45  // Properly determined byte buffer types with their own pools may help to reduce
    46  // memory waste.
    47  type Pool struct {
    48  	calls       [steps]uint64
    49  	calibrating uint64
    50  
    51  	defaultSize uint64
    52  	maxSize     uint64
    53  
    54  	pool sync.Pool[*RingBuffer]
    55  }
    56  
    57  var builtinPool = NewRingBufferPool()
    58  
    59  // Get returns an empty byte buffer from the pool.
    60  //
    61  // Got byte buffer may be returned to the pool via Put call.
    62  // This reduces the number of memory allocations required for byte buffer
    63  // management.
    64  func Get() *RingBuffer { return builtinPool.Get() }
    65  
    66  // Get returns new byte buffer with zero length.
    67  //
    68  // The byte buffer may be returned to the pool via Put after the use
    69  // in order to minimize GC overhead.
    70  func (p *Pool) Get() *RingBuffer {
    71  	v := p.pool.Get()
    72  	if v != nil {
    73  		return v
    74  	}
    75  
    76  	buffer := magicring.NewMagicBuffer(int(atomic.LoadUint64(&p.defaultSize)))
    77  
    78  	return buffer
    79  }
    80  
    81  // Put returns byte buffer to the pool.
    82  //
    83  // RingBuffer mustn't be touched after returning it to the pool,
    84  // otherwise, data races will occur.
    85  func Put(b *RingBuffer) {
    86  	b.Zeroes()
    87  	builtinPool.Put(b)
    88  }
    89  
    90  // Put releases byte buffer obtained via Get to the pool.
    91  //
    92  // The buffer mustn't be accessed after returning to the pool.
    93  func (p *Pool) Put(buffer *RingBuffer) {
    94  	idx := indexRingBufferPool(buffer.Cap())
    95  	if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
    96  		p.calibrate()
    97  	}
    98  
    99  	maxSize := int(atomic.LoadUint64(&p.maxSize))
   100  	if maxSize == 0 || buffer.Cap() <= maxSize {
   101  		buffer.Reset()
   102  		p.pool.Put(buffer)
   103  	}
   104  }
   105  
   106  func (p *Pool) calibrate() {
   107  	if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
   108  		return
   109  	}
   110  
   111  	callData := make(callSizes, 0, steps)
   112  
   113  	var callsSum uint64
   114  
   115  	for i := uint64(0); i < steps; i++ {
   116  		calls := atomic.SwapUint64(&p.calls[i], 0)
   117  		callsSum += calls
   118  		callData = append(callData, callSize{
   119  			calls: calls,
   120  			size:  minSize << i,
   121  		})
   122  	}
   123  	sort.Sort(callData)
   124  
   125  	defaultSize := callData[0].size
   126  	maxSize := defaultSize
   127  
   128  	maxSum := uint64(float64(callsSum) * maxPercentile)
   129  	callsSum = 0
   130  
   131  	for i := 0; i < steps; i++ {
   132  		if callsSum > maxSum {
   133  			break
   134  		}
   135  		callsSum += callData[i].calls
   136  
   137  		size := callData[i].size
   138  		if size > maxSize {
   139  			maxSize = size
   140  		}
   141  	}
   142  	atomic.StoreUint64(&p.defaultSize, defaultSize)
   143  	atomic.StoreUint64(&p.maxSize, maxSize)
   144  
   145  	atomic.StoreUint64(&p.calibrating, 0)
   146  }
   147  
   148  func NewRingBufferPool() Pool {
   149  	p := Pool{
   150  		pool: sync.NewPool[*RingBuffer](),
   151  	}
   152  	atomic.StoreUint64(&p.defaultSize, uint64(magicring.DefaultMagicBufferSize))
   153  
   154  	return p
   155  }
   156  
   157  type callSize struct {
   158  	calls uint64
   159  	size  uint64
   160  }
   161  
   162  type callSizes []callSize
   163  
   164  func (ci callSizes) Len() int {
   165  	return len(ci)
   166  }
   167  
   168  func (ci callSizes) Less(i, j int) bool {
   169  	return ci[i].calls > ci[j].calls
   170  }
   171  
   172  func (ci callSizes) Swap(i, j int) {
   173  	ci[i], ci[j] = ci[j], ci[i]
   174  }
   175  
   176  func indexRingBufferPool(n int) int {
   177  	n--
   178  	n >>= minBitSize
   179  	idx := 0
   180  
   181  	for n > 0 {
   182  		n >>= 1
   183  		idx++
   184  	}
   185  
   186  	if idx >= steps {
   187  		idx = steps - 1
   188  	}
   189  
   190  	return idx
   191  }