storj.io/uplink@v1.13.0/private/storage/streams/buffer/backend.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package buffer
     5  
     6  import (
     7  	"io"
     8  	"sync"
     9  	"sync/atomic"
    10  )
    11  
    12  // Backend is a backing store of bytes for a Backend.
    13  type Backend interface {
    14  	io.Writer
    15  	io.ReaderAt
    16  	io.Closer
    17  }
    18  
    19  const (
    20  	standardMaxEncryptedSegmentSize = 67254016
    21  
    22  	chunkSize = 1024 * 1024
    23  )
    24  
    25  var (
    26  	standardPool = sync.Pool{
    27  		New: func() interface{} {
    28  			// TODO: this pool approach is a bit of a bandaid - it would be good to
    29  			// rework this logic to not require this large allocation at all.
    30  			return new([standardMaxEncryptedSegmentSize]byte)
    31  		},
    32  	}
    33  
    34  	chunkPool = sync.Pool{
    35  		New: func() interface{} {
    36  			return new([chunkSize]byte)
    37  		},
    38  	}
    39  )
    40  
    41  // NewMemoryBackend returns a MemoryBackend with the provided initial
    42  // capacity. It implements the Backend interface.
    43  func NewMemoryBackend(cap int64) (rv *MemoryBackend) {
    44  	rv = &MemoryBackend{}
    45  	if cap == standardMaxEncryptedSegmentSize {
    46  		rv.buf = standardPool.Get().(*[standardMaxEncryptedSegmentSize]byte)[:]
    47  	} else {
    48  		rv.buf = make([]byte, cap)
    49  	}
    50  	return rv
    51  }
    52  
    53  // MemoryBackend implements the Backend interface backed by a slice.
    54  type MemoryBackend struct {
    55  	len    int64
    56  	buf    []byte
    57  	closed bool
    58  }
    59  
    60  // Write appends the data to the buffer.
    61  func (u *MemoryBackend) Write(p []byte) (n int, err error) {
    62  	if u.closed {
    63  		return 0, io.ErrClosedPipe
    64  	}
    65  	l := atomic.LoadInt64(&u.len)
    66  	n = copy(u.buf[l:], p)
    67  	if n != len(p) {
    68  		return n, io.ErrShortWrite
    69  	}
    70  	atomic.AddInt64(&u.len, int64(n))
    71  	return n, nil
    72  }
    73  
    74  // ReadAt reads into the provided buffer p starting at off.
    75  func (u *MemoryBackend) ReadAt(p []byte, off int64) (n int, err error) {
    76  	if u.closed {
    77  		return 0, io.ErrClosedPipe
    78  	}
    79  	l := atomic.LoadInt64(&u.len)
    80  	if off < 0 || off >= l {
    81  		return 0, io.EOF
    82  	}
    83  	return copy(p, u.buf[off:l]), nil
    84  }
    85  
    86  // Close releases memory and causes future calls to ReadAt and Write to fail.
    87  func (u *MemoryBackend) Close() error {
    88  	buf := u.buf
    89  	u.buf = nil
    90  	u.closed = true
    91  	if len(buf) == standardMaxEncryptedSegmentSize {
    92  		standardPool.Put((*[standardMaxEncryptedSegmentSize]byte)(buf))
    93  	}
    94  	return nil
    95  }
    96  
    97  // NewChunkBackend returns a ChunkBackend with the provided initial capacity.
    98  // Internally it stitchers writes together into small chunks to reduce the size
    99  // of allocations needed for small objects. It implements the Backend interface.
   100  // TODO: evaluate the usefulness of `cap` for the chunk backend.
   101  func NewChunkBackend(cap int64) (rv *ChunkBackend) {
   102  	// TODO: evaluate whether the chunks slice is worth trying to pool. Benchmarks
   103  	// currently show the ChunkBackend has one extra (tiny) allocation but is otherwise
   104  	// barely distinguishable to the MemoryBackend in terms of read/write performance.
   105  	chunks := make([]atomic.Pointer[[chunkSize]byte], chunksNeeded(cap))
   106  	return &ChunkBackend{chunks: chunks, cap: cap}
   107  }
   108  
   109  // ChunkBackend implements the Backend interface backed by a chained series of memory-pooled slices.
   110  type ChunkBackend struct {
   111  	end    atomic.Int64
   112  	cap    int64
   113  	chunks []atomic.Pointer[[chunkSize]byte]
   114  	closed bool
   115  }
   116  
   117  // Write appends the data to the buffer.
   118  func (u *ChunkBackend) Write(p []byte) (n int, err error) {
   119  	if u.closed {
   120  		return 0, io.ErrClosedPipe
   121  	}
   122  
   123  	end := u.end.Load()
   124  	// If writing p exceeds the cap then constrain p so the write
   125  	// no longer exceeds the cap and return ErrShortWrite.
   126  	if end+int64(len(p)) > u.cap {
   127  		p = p[:u.cap-end]
   128  		err = io.ErrShortWrite
   129  	}
   130  
   131  	// Calculate the starting chunk position relative to the end
   132  	chunkIdx, chunkOff := chunkPosition(end)
   133  
   134  	for len(p) > 0 {
   135  		chunk := u.chunks[chunkIdx].Load()
   136  		if chunk == nil {
   137  			chunk = chunkPool.Get().(*[chunkSize]byte)
   138  			u.chunks[chunkIdx].Store(chunk)
   139  		}
   140  		nchunk := copy(chunk[chunkOff:], p)
   141  		p = p[nchunk:]
   142  		n += nchunk
   143  
   144  		chunkIdx++
   145  		chunkOff = 0
   146  	}
   147  
   148  	if n > 0 {
   149  		u.end.Add(int64(n))
   150  	}
   151  	return n, err
   152  }
   153  
   154  // ReadAt reads into the provided buffer p starting at off.
   155  func (u *ChunkBackend) ReadAt(p []byte, off int64) (n int, err error) {
   156  	if u.closed {
   157  		return 0, io.ErrClosedPipe
   158  	}
   159  
   160  	end := u.end.Load()
   161  	if off < 0 || off >= end {
   162  		return 0, io.EOF
   163  	}
   164  
   165  	// If the read goes past the end, cap p to prevent read overflow.
   166  	if off+int64(len(p)) > end {
   167  		p = p[:end-off]
   168  	}
   169  
   170  	// Calculate the starting chunk position relative to the read offset
   171  	chunkIdx, chunkOff := chunkPosition(off)
   172  
   173  	for len(p) > 0 {
   174  		chunk := u.chunks[chunkIdx].Load()
   175  		nchunk := copy(p, chunk[chunkOff:])
   176  		p = p[nchunk:]
   177  		n += nchunk
   178  
   179  		chunkIdx++
   180  		chunkOff = 0
   181  	}
   182  	return n, nil
   183  }
   184  
   185  // Close releases memory and causes future calls to ReadAt and Write to fail.
   186  func (u *ChunkBackend) Close() error {
   187  	chunks := u.chunks
   188  	u.chunks = nil
   189  	u.closed = true
   190  	for i := 0; i < len(chunks); i++ {
   191  		chunk := chunks[i].Load()
   192  		if chunk == nil {
   193  			break
   194  		}
   195  		chunkPool.Put(chunk)
   196  	}
   197  	return nil
   198  }
   199  
   200  func chunksNeeded(n int64) int64 {
   201  	if n == 0 {
   202  		return 0
   203  	}
   204  	return 1 + ((n - 1) / chunkSize)
   205  }
   206  
   207  func chunkPosition(pos int64) (index, offset int64) {
   208  	index = pos / chunkSize
   209  	offset = pos - (index * chunkSize)
   210  	return index, offset
   211  }