storj.io/uplink@v1.13.0/private/eestream/bufpool.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package eestream
     5  
     6  import (
     7  	"sync"
     8  	"sync/atomic"
     9  
    10  	"storj.io/common/sync2/race2"
    11  )
    12  
    13  const globalBufSize = 32 * 1024
    14  
    15  var globalPool = sync.Pool{New: func() any { return new([globalBufSize]byte) }}
    16  
    17  // A BatchPool is a sync.Pool that deals with batches of erasure shares,
    18  // serialized as []byte slices of a fixed size. The fixed size is the largest
    19  // multiple of the erasure share size that fits in standardBufSize.
    20  type BatchPool struct {
    21  	bufSize int
    22  }
    23  
    24  // NewBatchPool creates a BatchPool with the given erasure share size.
    25  func NewBatchPool(shareSize int) *BatchPool {
    26  	return &BatchPool{
    27  		bufSize: (globalBufSize / shareSize) * shareSize,
    28  	}
    29  }
    30  
    31  // GetAndClaim returns a batch of the pool. To free the batch, a Dec() call is needed.
    32  func (b *BatchPool) GetAndClaim() *Batch {
    33  	batch := &Batch{
    34  		slice:   globalPool.Get().(*[globalBufSize]byte),
    35  		bufSize: b.bufSize,
    36  	}
    37  	batch.refCount.Store(1)
    38  	return batch
    39  }
    40  
    41  // Size returns the buffer size used in this pool.
    42  func (b *BatchPool) Size() int { return b.bufSize }
    43  
    44  // A Batch is a reference counted slice of erasure shares. Batches are returned
    45  // by BatchPool.Get with a starting reference count of 1.
    46  type Batch struct {
    47  	slice    *[globalBufSize]byte
    48  	bufSize  int
    49  	refCount atomic.Int32
    50  }
    51  
    52  // Slice returns the batch's underlying memory allocation.
    53  func (b *Batch) Slice() []byte { return b.slice[:b.bufSize] }
    54  
    55  // Claim adds 1 to the batch reference count and returns true if the batch
    56  // was claimable. See Release.
    57  func (b *Batch) Claim() bool {
    58  	for {
    59  		val := b.refCount.Load()
    60  		if val <= 0 {
    61  			return false
    62  		}
    63  		if b.refCount.CompareAndSwap(val, val+1) {
    64  			return true
    65  		}
    66  	}
    67  }
    68  
    69  // Release subtracts 1 from the batch reference count, returning the batch to
    70  // the pool when it hits zero. Future Claim calls will return false once
    71  // the counter drops to zero.
    72  func (b *Batch) Release() {
    73  	res := b.refCount.Add(-1)
    74  	if res <= 0 {
    75  		if res < 0 {
    76  			panic("extra release")
    77  		}
    78  		race2.WriteSlice(b.slice[:])
    79  		globalPool.Put(b.slice)
    80  	}
    81  }