github.com/cockroachdb/pebble@v1.1.2/sstable/buffer_pool.go (about)

     1  // Copyright 2023 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package sstable
     6  
     7  import (
     8  	"github.com/cockroachdb/errors"
     9  	"github.com/cockroachdb/pebble/internal/cache"
    10  )
    11  
    12  // A bufferHandle is a handle to manually-managed memory. The handle may point
    13  // to a block in the block cache (h.Get() != nil), or a buffer that exists
    14  // outside the block cache allocated from a BufferPool (b.Valid()).
    15  type bufferHandle struct {
    16  	h cache.Handle
    17  	b Buf
    18  }
    19  
    20  // Get retrieves the underlying buffer referenced by the handle.
    21  func (bh bufferHandle) Get() []byte {
    22  	if v := bh.h.Get(); v != nil {
    23  		return v
    24  	} else if bh.b.p != nil {
    25  		return bh.b.p.pool[bh.b.i].b
    26  	}
    27  	return nil
    28  }
    29  
    30  // Release releases the buffer, either back to the block cache or BufferPool.
    31  func (bh bufferHandle) Release() {
    32  	bh.h.Release()
    33  	bh.b.Release()
    34  }
    35  
    36  // A BufferPool holds a pool of buffers for holding sstable blocks. An initial
    37  // size of the pool is provided on Init, but a BufferPool will grow to meet the
    38  // largest working set size. It'll never shrink. When a buffer is released, the
    39  // BufferPool recycles the buffer for future allocations.
    40  //
    41  // A BufferPool should only be used for short-lived allocations with
    42  // well-understood working set sizes to avoid excessive memory consumption.
    43  //
    44  // BufferPool is not thread-safe.
    45  type BufferPool struct {
    46  	// pool contains all the buffers held by the pool, including buffers that
    47  	// are in-use. For every i < len(pool): pool[i].v is non-nil.
    48  	pool []allocedBuffer
    49  }
    50  
    51  type allocedBuffer struct {
    52  	v *cache.Value
    53  	// b holds the current byte slice. It's backed by v, but may be a subslice
    54  	// of v's memory while the buffer is in-use [ len(b) ≤ len(v.Buf()) ].
    55  	//
    56  	// If the buffer is not currently in-use, b is nil. When being recycled, the
    57  	// BufferPool.Alloc will reset b to be a subslice of v.Buf().
    58  	b []byte
    59  }
    60  
    61  // Init initializes the pool with an initial working set buffer size of
    62  // `initialSize`.
    63  func (p *BufferPool) Init(initialSize int) {
    64  	*p = BufferPool{
    65  		pool: make([]allocedBuffer, 0, initialSize),
    66  	}
    67  }
    68  
    69  // initPreallocated is like Init but for internal sstable package use in
    70  // instances where a pre-allocated slice of []allocedBuffer already exists. It's
    71  // used to avoid an extra allocation initializing BufferPool.pool.
    72  func (p *BufferPool) initPreallocated(pool []allocedBuffer) {
    73  	*p = BufferPool{
    74  		pool: pool[:0],
    75  	}
    76  }
    77  
    78  // Release releases all buffers held by the pool and resets the pool to an
    79  // uninitialized state.
    80  func (p *BufferPool) Release() {
    81  	for i := range p.pool {
    82  		if p.pool[i].b != nil {
    83  			panic(errors.AssertionFailedf("Release called on a BufferPool with in-use buffers"))
    84  		}
    85  		cache.Free(p.pool[i].v)
    86  	}
    87  	*p = BufferPool{}
    88  }
    89  
    90  // Alloc allocates a new buffer of size n. If the pool already holds a buffer at
    91  // least as large as n, the pooled buffer is used instead.
    92  //
    93  // Alloc is O(MAX(N,M)) where N is the largest number of concurrently in-use
    94  // buffers allocated and M is the initialSize passed to Init.
    95  func (p *BufferPool) Alloc(n int) Buf {
    96  	unusableBufferIdx := -1
    97  	for i := 0; i < len(p.pool); i++ {
    98  		if p.pool[i].b == nil {
    99  			if len(p.pool[i].v.Buf()) >= n {
   100  				p.pool[i].b = p.pool[i].v.Buf()[:n]
   101  				return Buf{p: p, i: i}
   102  			}
   103  			unusableBufferIdx = i
   104  		}
   105  	}
   106  
   107  	// If we would need to grow the size of the pool to allocate another buffer,
   108  	// but there was a slot available occupied by a buffer that's just too
   109  	// small, replace the too-small buffer.
   110  	if len(p.pool) == cap(p.pool) && unusableBufferIdx >= 0 {
   111  		i := unusableBufferIdx
   112  		cache.Free(p.pool[i].v)
   113  		p.pool[i].v = cache.Alloc(n)
   114  		p.pool[i].b = p.pool[i].v.Buf()
   115  		return Buf{p: p, i: i}
   116  	}
   117  
   118  	// Allocate a new buffer.
   119  	v := cache.Alloc(n)
   120  	p.pool = append(p.pool, allocedBuffer{v: v, b: v.Buf()[:n]})
   121  	return Buf{p: p, i: len(p.pool) - 1}
   122  }
   123  
   124  // A Buf holds a reference to a manually-managed, pooled byte buffer.
   125  type Buf struct {
   126  	p *BufferPool
   127  	// i holds the index into p.pool where the buffer may be found. This scheme
   128  	// avoids needing to allocate the handle to the buffer on the heap at the
   129  	// cost of copying two words instead of one.
   130  	i int
   131  }
   132  
   133  // Valid returns true if the buf holds a valid buffer.
   134  func (b Buf) Valid() bool {
   135  	return b.p != nil
   136  }
   137  
   138  // Release releases the buffer back to the pool.
   139  func (b *Buf) Release() {
   140  	if b.p == nil {
   141  		return
   142  	}
   143  	// Clear the allocedBuffer's byte slice. This signals the allocated buffer
   144  	// is no longer in use and a future call to BufferPool.Alloc may reuse this
   145  	// buffer.
   146  	b.p.pool[b.i].b = nil
   147  	b.p = nil
   148  }