github.com/scottcagno/storage@v1.8.0/pkg/bio/writer.go (about)

     1  package bio
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"io"
     7  )
     8  
     9  var pad [blockSize]byte
    10  
    11  // Writer is a bio writer that implements the
    12  // io.Writer and io.WriterAt interfaces
    13  type Writer struct {
    14  	bw *bufio.Writer // w is the underlying writer
    15  }
    16  
    17  // NewWriter returns a new *Writer whose buffer has
    18  // an underlying size of chunkSize. A Writer writes
    19  // fixed size blocks of data into fixed size chunks,
    20  // also sometimes called spans.
    21  func NewWriter(w io.Writer) *Writer {
    22  	// if we get a bytes buffer as a writer
    23  	// make sure we grow it, otherwise bad
    24  	// things will happen
    25  	if b, ok := w.(*bytes.Buffer); ok {
    26  		if chunkSize > b.Cap() {
    27  			b.Grow(chunkSize)
    28  		}
    29  	}
    30  	// create and return a new *Writer
    31  	return &Writer{
    32  		bw: bufio.NewWriterSize(w, chunkSize),
    33  	}
    34  }
    35  
    36  // Write writes the contents of p into the buffer. Write
    37  // returns an error if len(p) > maxDataPerChunk.
    38  func (w *Writer) Write(p []byte) (int, error) {
    39  	// perform error checking
    40  	if p == nil {
    41  		return -1, ErrDataIsEmpty
    42  	}
    43  	if len(p) > maxDataPerChunk {
    44  		return -1, ErrSliceTooLarge
    45  	}
    46  	// init error var for later
    47  	var err error
    48  	// get block count for writing
    49  	part, parts := 1, divUp(len(p), maxDataPerBlock)
    50  	// start writing blocks sequentially
    51  	for i := 0; i < len(p); i += maxDataPerBlock {
    52  		// setup j to be the slice ending boundary
    53  		j := i + maxDataPerBlock
    54  		// necessary check to avoid slicing beyond p's capacity
    55  		if j > len(p) {
    56  			j = len(p)
    57  		}
    58  		// write block (a slice of p, from i to j)
    59  		_, err = w.writeBlock(p[i:j], part, parts)
    60  		if err != nil {
    61  			return -1, err
    62  		}
    63  		// increment parts (if need be)
    64  		part++
    65  	}
    66  	// done writing, flush
    67  	err = w.bw.Flush()
    68  	if err != nil {
    69  		return -1, err
    70  	}
    71  	// return
    72  	return parts * blockSize, nil
    73  }
    74  
    75  // WriteAt writes len(p) bytes from p to the underlying data stream
    76  // at offset off. It returns the number of bytes written from
    77  // p (0 <= n <= len(p)) and any error encountered that caused the
    78  // write to stop early. WriteAt must return a non-nil error if it
    79  // returns n < len(p). If WriteAt is writing to a destination with
    80  // a seek offset, WriteAt should not affect nor be affected by the
    81  // underlying seek offset.
    82  func (w *Writer) WriteAt(p []byte, off int64) (int, error) {
    83  
    84  	return -1, nil
    85  }
    86  
    87  func (w *Writer) writeBlock(p []byte, part, parts int) (int, error) {
    88  	// create header
    89  	h := &header{
    90  		status: statusActive,
    91  		kind:   getKind(part, parts),
    92  		part:   uint8(part),
    93  		parts:  uint8(parts),
    94  		length: uint16(len(p)),
    95  	}
    96  	// write header
    97  	_, err := h.WriteTo(w.bw)
    98  	if err != nil {
    99  		return -1, err
   100  	}
   101  	// write body
   102  	n, err := w.bw.Write(p)
   103  	if err != nil {
   104  		return -1, err
   105  	}
   106  	// check to see if we need to pad
   107  	if n < maxDataPerBlock {
   108  		padding := maxDataPerBlock - n
   109  		_, err = w.bw.Write(pad[:padding])
   110  		if err != nil {
   111  			return -1, err
   112  		}
   113  	}
   114  	// return exactly how much data was written into this block
   115  	return n, nil
   116  }