storj.io/uplink@v1.13.0/private/storage/streams/buffer/cursor.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package buffer
     5  
     6  import (
     7  	"sync"
     8  	"sync/atomic"
     9  
    10  	"github.com/zeebo/errs"
    11  )
    12  
    13  // Cursor keeps track of how many bytes have been written and the furthest advanced
    14  // reader, letting one wait until space or bytes are available.
    15  type Cursor struct {
    16  	writeAhead int64
    17  
    18  	mu   sync.Mutex
    19  	cond sync.Cond
    20  
    21  	doneReading uint32
    22  	doneWriting uint32
    23  
    24  	readErr  error
    25  	writeErr error
    26  
    27  	maxRead int64
    28  	written int64
    29  }
    30  
    31  // NewCursor constructs a new cursor that keeps track of reads and writes
    32  // into some buffer, allowing one to wait until enough data has been read or written.
    33  func NewCursor(writeAhead int64) *Cursor {
    34  	c := &Cursor{writeAhead: writeAhead}
    35  	c.cond.L = &c.mu
    36  	return c
    37  }
    38  
    39  // WaitRead blocks until the writer is done or until at least n bytes have been written.
    40  // It returns min(n, w.written) letting the caller know the largest offset that can be read.
    41  // The ok boolean is true if there are more bytes to be read. If writing is done with an
    42  // error, then 0 and that error are returned. If writing is done with no error and the requested
    43  // amount is at least the amount written, it returns the written amount, false, and nil.
    44  func (c *Cursor) WaitRead(n int64) (m int64, ok bool, err error) {
    45  	if atomic.LoadUint32(&c.doneReading) != 0 {
    46  		return 0, false, errs.New("WaitRead called after DoneReading")
    47  	}
    48  	if written := atomic.LoadInt64(&c.written); n < written {
    49  		return n, true, nil
    50  	}
    51  
    52  	c.mu.Lock()
    53  	defer c.mu.Unlock()
    54  
    55  	if atomic.LoadUint32(&c.doneReading) != 0 {
    56  		return 0, false, errs.New("WaitRead called after DoneReading")
    57  	}
    58  
    59  	for {
    60  		doneWriting := atomic.LoadUint32(&c.doneWriting) != 0
    61  		maxRead := atomic.LoadInt64(&c.maxRead)
    62  		written := atomic.LoadInt64(&c.written)
    63  
    64  		switch {
    65  		// first, return any write error if there is one.
    66  		case c.writeErr != nil:
    67  			return 0, false, c.writeErr
    68  
    69  		// next, return io.EOF when fully read.
    70  		case n >= written && doneWriting:
    71  			return written, false, nil
    72  
    73  		// next, allow reading up to the written amount.
    74  		case n <= written:
    75  			return n, true, nil
    76  
    77  		// next, if maxRead is not yet caught up to written, allow reads to proceed up to written.
    78  		case maxRead < written:
    79  			return written, true, nil
    80  
    81  		// finally, if more is requested, allow at most the written amount.
    82  		case doneWriting:
    83  			return written, true, nil
    84  		}
    85  
    86  		c.cond.Wait()
    87  	}
    88  }
    89  
    90  // WaitWrite blocks until the readers are done or until the furthest advanced reader is
    91  // within the writeAhead of the writer. It returns the largest offset that can be written.
    92  // The ok boolean is true if there are readers waiting for more bytes. If reading is done
    93  // with an error, then 0 and that error are returned. If reading is done with no error, then
    94  // it returns the amount written, false, and nil.
    95  func (c *Cursor) WaitWrite(n int64) (m int64, ok bool, err error) {
    96  	if atomic.LoadUint32(&c.doneWriting) != 0 {
    97  		return 0, false, errs.New("WaitWrite called after DoneWriting")
    98  	}
    99  	if maxRead := atomic.LoadInt64(&c.maxRead); n <= maxRead+c.writeAhead {
   100  		return n, true, nil
   101  	}
   102  
   103  	c.mu.Lock()
   104  	defer c.mu.Unlock()
   105  
   106  	if atomic.LoadUint32(&c.doneWriting) != 0 {
   107  		return 0, false, errs.New("WaitWrite called after DoneWriting")
   108  	}
   109  
   110  	for {
   111  		doneReading := atomic.LoadUint32(&c.doneReading) != 0
   112  		maxRead := atomic.LoadInt64(&c.maxRead)
   113  		written := atomic.LoadInt64(&c.written)
   114  
   115  		switch {
   116  		// first, return any read error if there is one.
   117  		case c.readErr != nil:
   118  			return 0, false, c.readErr
   119  
   120  		// next, don't allow more writes if the reader is done.
   121  		case doneReading:
   122  			return written, false, nil
   123  
   124  		// next, allow when enough behind the furthest advanced reader.
   125  		case n <= maxRead+c.writeAhead:
   126  			return n, true, nil
   127  
   128  		// finally, only allow up to a maximum amount ahead of the furthest reader.
   129  		case written < maxRead+c.writeAhead:
   130  			return maxRead + c.writeAhead, true, nil
   131  		}
   132  
   133  		c.cond.Wait()
   134  	}
   135  }
   136  
   137  // DoneWriting signals that no more Write calls will happen. It returns true
   138  // the first time DoneWriting and DoneReading have both been called.
   139  func (c *Cursor) DoneWriting(err error) bool {
   140  	c.mu.Lock()
   141  	defer c.mu.Unlock()
   142  
   143  	if atomic.LoadUint32(&c.doneWriting) == 0 {
   144  		atomic.StoreUint32(&c.doneWriting, 1)
   145  		c.writeErr = err
   146  		c.cond.Broadcast()
   147  
   148  		return atomic.LoadUint32(&c.doneReading) != 0
   149  	}
   150  
   151  	return false
   152  }
   153  
   154  // DoneReading signals that no more Read calls will happen. It returns true
   155  // the first time DoneWriting and DoneReading have both been called.
   156  func (c *Cursor) DoneReading(err error) bool {
   157  	c.mu.Lock()
   158  	defer c.mu.Unlock()
   159  
   160  	if atomic.LoadUint32(&c.doneReading) == 0 {
   161  		atomic.StoreUint32(&c.doneReading, 1)
   162  		c.readErr = err
   163  		c.cond.Broadcast()
   164  
   165  		return atomic.LoadUint32(&c.doneWriting) != 0
   166  	}
   167  
   168  	return false
   169  }
   170  
   171  // ReadTo reports to the cursor that some reader read up to byte offset n.
   172  func (c *Cursor) ReadTo(n int64) {
   173  	for {
   174  		maxRead := atomic.LoadInt64(&c.maxRead)
   175  		if n <= maxRead {
   176  			return
   177  		}
   178  		if atomic.CompareAndSwapInt64(&c.maxRead, maxRead, n) {
   179  			c.mu.Lock()
   180  			defer c.mu.Unlock()
   181  
   182  			c.cond.Broadcast()
   183  			return
   184  		}
   185  	}
   186  }
   187  
   188  // WroteTo reports to the cursor that the writer wrote up to byte offset n.
   189  func (c *Cursor) WroteTo(n int64) {
   190  	for {
   191  		written := atomic.LoadInt64(&c.written)
   192  		if n <= written {
   193  			return
   194  		}
   195  		if atomic.CompareAndSwapInt64(&c.written, written, n) {
   196  			c.mu.Lock()
   197  			defer c.mu.Unlock()
   198  
   199  			c.cond.Broadcast()
   200  			return
   201  		}
   202  	}
   203  }