github.com/cockroachdb/pebble@v1.1.2/sstable/write_queue.go (about)

     1  package sstable
     2  
     3  import (
     4  	"sync"
     5  
     6  	"github.com/cockroachdb/pebble/internal/base"
     7  )
     8  
     9  type writeTask struct {
    10  	// Since writeTasks are pooled, the compressionDone channel will be re-used.
    11  	// It is necessary that any writes to the channel have already been read,
    12  	// before adding the writeTask back to the pool.
    13  	compressionDone chan bool
    14  	buf             *dataBlockBuf
    15  	// If this is not nil, then this index block will be flushed.
    16  	flushableIndexBlock *indexBlockBuf
    17  	// currIndexBlock is the index block on which indexBlock.add must be called.
    18  	currIndexBlock *indexBlockBuf
    19  	indexEntrySep  InternalKey
    20  	// inflightIndexEntrySize is used to decrement Writer.indexBlock.sizeEstimate.inflightSize.
    21  	indexInflightSize int
    22  	// If the index block is finished, then we set the finishedIndexProps here.
    23  	finishedIndexProps []byte
    24  }
    25  
    26  // It is not the responsibility of the writeTask to clear the
    27  // task.flushableIndexBlock, and task.buf.
    28  func (task *writeTask) clear() {
    29  	*task = writeTask{
    30  		indexEntrySep:   base.InvalidInternalKey,
    31  		compressionDone: task.compressionDone,
    32  	}
    33  }
    34  
    35  // Note that only the Writer client goroutine will be adding tasks to the writeQueue.
    36  // Both the Writer client and the compression goroutines will be able to write to
    37  // writeTask.compressionDone to indicate that the compression job associated with
    38  // a writeTask has finished.
    39  type writeQueue struct {
    40  	tasks  chan *writeTask
    41  	wg     sync.WaitGroup
    42  	writer *Writer
    43  
    44  	// err represents an error which is encountered when the write queue attempts
    45  	// to write a block to disk. The error is stored here to skip unnecessary block
    46  	// writes once the first error is encountered.
    47  	err    error
    48  	closed bool
    49  }
    50  
    51  func newWriteQueue(size int, writer *Writer) *writeQueue {
    52  	w := &writeQueue{}
    53  	w.tasks = make(chan *writeTask, size)
    54  	w.writer = writer
    55  
    56  	w.wg.Add(1)
    57  	go w.runWorker()
    58  	return w
    59  }
    60  
    61  func (w *writeQueue) performWrite(task *writeTask) error {
    62  	var bh BlockHandle
    63  	var bhp BlockHandleWithProperties
    64  
    65  	var err error
    66  	if bh, err = w.writer.writeCompressedBlock(task.buf.compressed, task.buf.tmp[:]); err != nil {
    67  		return err
    68  	}
    69  
    70  	bhp = BlockHandleWithProperties{BlockHandle: bh, Props: task.buf.dataBlockProps}
    71  	if err = w.writer.addIndexEntry(
    72  		task.indexEntrySep, bhp, task.buf.tmp[:], task.flushableIndexBlock, task.currIndexBlock,
    73  		task.indexInflightSize, task.finishedIndexProps); err != nil {
    74  		return err
    75  	}
    76  
    77  	return nil
    78  }
    79  
    80  // It is necessary to ensure that none of the buffers in the writeTask,
    81  // dataBlockBuf, indexBlockBuf, are pointed to by another struct.
    82  func (w *writeQueue) releaseBuffers(task *writeTask) {
    83  	task.buf.clear()
    84  	dataBlockBufPool.Put(task.buf)
    85  
    86  	// This index block is no longer used by the Writer, so we can add it back
    87  	// to the pool.
    88  	if task.flushableIndexBlock != nil {
    89  		task.flushableIndexBlock.clear()
    90  		indexBlockBufPool.Put(task.flushableIndexBlock)
    91  	}
    92  
    93  	task.clear()
    94  	writeTaskPool.Put(task)
    95  }
    96  
    97  func (w *writeQueue) runWorker() {
    98  	for task := range w.tasks {
    99  		<-task.compressionDone
   100  
   101  		if w.err == nil {
   102  			w.err = w.performWrite(task)
   103  		}
   104  
   105  		w.releaseBuffers(task)
   106  	}
   107  	w.wg.Done()
   108  }
   109  
   110  func (w *writeQueue) add(task *writeTask) {
   111  	w.tasks <- task
   112  }
   113  
   114  // addSync will perform the writeTask synchronously with the caller goroutine. Calls to addSync
   115  // are no longer valid once writeQueue.add has been called at least once.
   116  func (w *writeQueue) addSync(task *writeTask) error {
   117  	// This should instantly return without blocking.
   118  	<-task.compressionDone
   119  
   120  	if w.err == nil {
   121  		w.err = w.performWrite(task)
   122  	}
   123  
   124  	w.releaseBuffers(task)
   125  
   126  	return w.err
   127  }
   128  
   129  // finish should only be called once no more tasks will be added to the writeQueue.
   130  // finish will return any error which was encountered while tasks were processed.
   131  func (w *writeQueue) finish() error {
   132  	if w.closed {
   133  		return w.err
   134  	}
   135  
   136  	close(w.tasks)
   137  	w.wg.Wait()
   138  	w.closed = true
   139  	return w.err
   140  }