github.com/scottcagno/storage@v1.8.0/pkg/bio/_writer.go (about)

     1  package bio
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"log"
     7  )
     8  
     9  type Writer struct {
    10  	err error
    11  	buf []byte    // buf is a reserved buffer
    12  	n   int       // n is the current offset in the buffer
    13  	wr  io.Writer // w is the underlying writer
    14  	bc  int       // block count
    15  }
    16  
    17  // NewWriter returns a new writer whose buffer has
    18  // an underlying size of chunkSize. A Writer writes
    19  // fixed size blocks of data into fixed size chunks,
    20  // also sometimes called spans.
    21  func NewWriter(w io.Writer) *Writer {
    22  	bw := &Writer{
    23  		wr:  w,
    24  		buf: make([]byte, chunkSize, chunkSize),
    25  	}
    26  	bw.initBlocks()
    27  	return bw
    28  }
    29  
    30  func (bw *Writer) initBlocks() {
    31  	for n := 0; n < len(bw.buf); n += blockSize {
    32  		_, err := encodeHeader(bw.buf[n:n+headerSize], nil)
    33  		if err != nil {
    34  			panic(err)
    35  		}
    36  		bw.bc++
    37  	}
    38  }
    39  
    40  func getSliceBounds(p []byte, beg, end int) (int, int) {
    41  	slice := p[beg:end]
    42  	if len(slice) < maxDataPerBlock {
    43  		return beg, len(slice)
    44  	}
    45  	return beg, maxDataPerBlock
    46  }
    47  
    48  func slice(p []byte, beg, end int) []byte {
    49  	if beg < 0 {
    50  		beg = 0
    51  	}
    52  	if end > len(p) {
    53  		end = len(p)
    54  	}
    55  	return p[beg:end]
    56  }
    57  
    58  func (bw *Writer) Write1(p []byte) (int, error) {
    59  	// get the base block count required
    60  	blocks := divUp(len(p), maxDataPerBlock)
    61  	if blocks > blocksPerChunk {
    62  		return -1, ErrInvalidSize
    63  	}
    64  	var prev int
    65  	prev = bw.n
    66  	// write block, or blocks
    67  	for block, off := 1, 0; block <= blocks; block++ {
    68  		fmt.Printf("[BEFORE] bw.n=%d, off=%d\n", bw.n, off)
    69  		// re-calc ending slice point
    70  		data := slice(p, off, off+maxDataPerBlock)
    71  		// write block and update the slice points
    72  		n, err := bw.writeBlockPart(data, block, blocks)
    73  		if err != nil {
    74  			return -1, err
    75  		}
    76  		// update offset
    77  		off += n
    78  		fmt.Printf("[AFTER] bw.n=%d, off=%d\n", bw.n, off)
    79  	}
    80  	fmt.Printf("wrote %d blocks, previous offset=%d, current offset=%d\n", blocks, prev, bw.n)
    81  	// flush block or blocks to disk
    82  	err := bw.Flush()
    83  	if err != nil {
    84  		return -1, err
    85  	}
    86  	// return nil error
    87  	return blocks, nil
    88  }
    89  
    90  func (bw *Writer) writeBlockPart(p []byte, part, parts int) (int, error) {
    91  	log.Println("writing:", string(p))
    92  	// check to make sure data is not too big
    93  	if len(p) > maxDataPerBlock {
    94  		return -1, ErrInvalidSize
    95  	}
    96  	// check to make sure we have room in the
    97  	// current chunk to accommodate another block
    98  	if bw.Available() < blockSize {
    99  		// if not, flush and proceed
   100  		err := bw.Flush()
   101  		if err != nil {
   102  			return -1, err
   103  		}
   104  	}
   105  	// fill out header
   106  	hdr := &header{
   107  		status: statusActive,
   108  		kind:   getKind(part, parts),
   109  		part:   uint8(part),
   110  		parts:  uint8(parts),
   111  		length: uint16(len(p)),
   112  	}
   113  	// store local offset to track how much
   114  	// data we write in this block
   115  	var nn, wrote int
   116  	nn = bw.n
   117  	// encode header
   118  	n, err := encodeHeader(bw.buf[nn:nn+headerSize], hdr)
   119  	if err != nil {
   120  		return -1, err
   121  	}
   122  	nn += n
   123  	// write data
   124  	n = copy(bw.buf[nn:], p)
   125  	nn += n
   126  
   127  	//// get the next offset alignment
   128  	//noff := align(nn, blockMask)
   129  	//// check if we need to pad out the block
   130  	//if nn < noff {
   131  	//	// we do, update local offset
   132  	//	nn += noff - nn
   133  	//}
   134  
   135  	// store the actual data written (minus the header) so
   136  	// we know where to pick up for the next write. we must
   137  	// do this here before we proceed to pad out the block
   138  	wrote = nn - headerSize
   139  
   140  	// check to see if the block needs to be padded
   141  	if diff := nn & blockMask; diff != 0 {
   142  		// move offset to correct place for next write
   143  		nn += blockSize - diff
   144  	}
   145  
   146  	// we should be good to go, lets update the writers
   147  	// global offset now that we know everything is okay
   148  	bw.n += nn
   149  	// and return the ACTUAL data written, and a nil error
   150  	return wrote, nil
   151  }
   152  
   153  func (bw *Writer) Write2(p []byte) (int, error) {
   154  	// implement me...
   155  	return 0, nil
   156  }
   157  
   158  func (bw *Writer) WriteSpan(p []byte) (int, error) {
   159  	// check to make sure data is not too big
   160  	if len(p) > maxDataPerChunk {
   161  		return -1, ErrInvalidSize
   162  	}
   163  	// check to make sure we have room in the
   164  	// current chunk to accommodate another block
   165  	if bw.Available() < align(len(p), blockMask) {
   166  		// if not, flush and proceed
   167  		err := bw.Flush()
   168  		if err != nil {
   169  			return -1, err
   170  		}
   171  	}
   172  	// check to make sure our buffer offset is
   173  	// still aligned to a perfect block offset
   174  	if bw.n&blockMask != 0 {
   175  		return -1, ErrInvalidOffset
   176  	}
   177  	// check if write will fit in single block
   178  	if len(p) <= maxDataPerBlock {
   179  		log.Println("single block writer")
   180  		// if it's good, then write the block
   181  		n, err := bw.writeBlock(p, 1, 1)
   182  		if err != nil {
   183  			return n, err
   184  		}
   185  		err = bw.Flush()
   186  		if err != nil {
   187  			return n, err
   188  		}
   189  		// return data written and nil error
   190  		return n, nil
   191  	}
   192  	// for later maybe?
   193  	var nn int
   194  	// calculate number of blocks
   195  	blks := calcBlocks(len(p) + (len(p)/maxDataPerBlock)*headerSize)
   196  	// otherwise, write a span of blocks
   197  	for part, parts, off := 1, blks, 0; part <= parts; part++ {
   198  		log.Printf("multi-block writer: part=%d, parts=%d, off=%d (n=%d)", part, parts, off, bw.n)
   199  		// calculate offset
   200  		//beg := (part - 1) * maxDataPerBlock
   201  		// calculate end offset
   202  		//end := part * maxDataPerBlock
   203  		//if end > len(p) {
   204  		//	end = len(p)
   205  		//}
   206  		// write block
   207  		n, err := bw.writeBlock(p[off:off+maxDataPerBlock], part, parts)
   208  		if err != nil {
   209  			return 0, err
   210  		}
   211  		nn += n
   212  		off += n - headerSize
   213  	}
   214  	// make sure we flush that data
   215  	err := bw.Flush()
   216  	if err != nil {
   217  		return nn, err
   218  	}
   219  	// return data written and nil error
   220  	return nn, nil
   221  }
   222  
   223  // writeBlock writes data to a block sized chunk. It's parent
   224  // method is responsible for dividing up any data that is larger
   225  // than what can fit in the block and supplying it with correct
   226  // part and parts for the header
   227  func (bw *Writer) writeBlock(p []byte, part, parts int) (int, error) {
   228  	// check to make sure data is not too big
   229  	if len(p) > maxDataPerBlock {
   230  		return -1, ErrInvalidSize
   231  	}
   232  	// check to make sure we have room in the
   233  	// current chunk to accommodate another block
   234  	if bw.Available() < blockSize {
   235  		// if not, flush and proceed
   236  		err := bw.Flush()
   237  		if err != nil {
   238  			return -1, err
   239  		}
   240  	}
   241  	// check to make sure our buffer offset is
   242  	// still aligned to a perfect block offset
   243  	if bw.n&blockMask != 0 {
   244  		return -1, ErrInvalidOffset
   245  	}
   246  	// fill out header
   247  	hdr := &header{
   248  		status: statusActive,
   249  		kind:   getKind(part, parts),
   250  		part:   uint8(part),
   251  		parts:  uint8(parts),
   252  		length: uint16(len(p)),
   253  	}
   254  	// store current offset for later
   255  	var nn int
   256  	nn = bw.n
   257  	// encode the header first
   258  	n, err := encodeHeader(p[0:headerSize], hdr)
   259  	if err != nil {
   260  		return -1, err
   261  	}
   262  	nn += n
   263  	// update buffer offset and write data to buffer
   264  	n = copy(bw.buf[nn:], p[headerSize:])
   265  	nn += n
   266  	bw.n += nn
   267  	// check to see if the block needs to be padded
   268  	if diff := bw.n & blockMask; diff != 0 {
   269  		// move offset to correct place for next write
   270  		bw.n += blockSize - diff
   271  	}
   272  	// return bytes written, and a nil error
   273  	return nn, nil
   274  
   275  }
   276  
   277  // Flush writes any buffered data to the underlying io.Writer
   278  func (bw *Writer) Flush() error {
   279  	if bw.err != nil {
   280  		return bw.err
   281  	}
   282  	if bw.n == 0 {
   283  		return nil
   284  	}
   285  	n, err := bw.wr.Write(bw.buf[0:bw.n])
   286  	if n < bw.n && err == nil {
   287  		err = io.ErrShortWrite
   288  	}
   289  	if err != nil {
   290  		if n > 0 && n < bw.n {
   291  			copy(bw.buf[0:bw.n-n], bw.buf[n:bw.n])
   292  		}
   293  		bw.n -= n
   294  		bw.err = err
   295  		return err
   296  	}
   297  	bw.n = 0
   298  	return nil
   299  }
   300  
   301  // Available returns how many bytes are unused in the buffer
   302  func (bw *Writer) Available() int {
   303  	return len(bw.buf) - bw.n
   304  }
   305  
   306  // Buffered returns the number of bytes that have been written into the buffer
   307  func (bw *Writer) Buffered() int {
   308  	return bw.n
   309  }
   310  
   311  func (bw *Writer) Info() string {
   312  	ss := fmt.Sprintf("writer:\n")
   313  	ss += fmt.Sprintf("n=%d, buffered=%d, available=%d\n", bw.n, bw.Buffered(), bw.Available())
   314  	for i := 0; i < len(bw.buf); i += blockSize {
   315  		ss += fmt.Sprintf("\tblock[%.2d]\n", i/blockSize)
   316  		hdr := new(header)
   317  		decodeHeader(bw.buf[i:i+headerSize], hdr)
   318  		ss += fmt.Sprintf("\t\t%s\n", hdr)
   319  		dat := bw.buf[i+headerSize : i+blockSize]
   320  		//ss += fmt.Sprintf("\t\t%s\n", longStr(string(dat), "", blockSize))
   321  		ss += fmt.Sprintf("\t\t%q\n", dat)
   322  	}
   323  	return ss
   324  }