github.com/2lambda123/git-lfs@v2.5.2+incompatible/tools/iotools.go (about)

     1  package tools
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"encoding/hex"
     7  	"hash"
     8  	"io"
     9  	"io/ioutil"
    10  	"os"
    11  
    12  	"github.com/git-lfs/git-lfs/errors"
    13  )
    14  
    15  const (
    16  	// memoryBufferLimit is the number of bytes to buffer in memory before
    17  	// spooling the contents of an `io.Reader` in `Spool()` to a temporary
    18  	// file on disk.
    19  	memoryBufferLimit = 1024
    20  )
    21  
    22  // CopyWithCallback copies reader to writer while performing a progress callback
    23  func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb CopyCallback) (int64, error) {
    24  	if success, _ := CloneFile(writer, reader); success {
    25  		if cb != nil {
    26  			cb(totalSize, totalSize, 0)
    27  		}
    28  		return totalSize, nil
    29  	}
    30  	if cb == nil {
    31  		return io.Copy(writer, reader)
    32  	}
    33  
    34  	cbReader := &CallbackReader{
    35  		C:         cb,
    36  		TotalSize: totalSize,
    37  		Reader:    reader,
    38  	}
    39  	return io.Copy(writer, cbReader)
    40  }
    41  
    42  // Get a new Hash instance of the type used to hash LFS content
    43  func NewLfsContentHash() hash.Hash {
    44  	return sha256.New()
    45  }
    46  
    47  // HashingReader wraps a reader and calculates the hash of the data as it is read
    48  type HashingReader struct {
    49  	reader io.Reader
    50  	hasher hash.Hash
    51  }
    52  
    53  func NewHashingReader(r io.Reader) *HashingReader {
    54  	return &HashingReader{r, NewLfsContentHash()}
    55  }
    56  
    57  func NewHashingReaderPreloadHash(r io.Reader, hash hash.Hash) *HashingReader {
    58  	return &HashingReader{r, hash}
    59  }
    60  
    61  func (r *HashingReader) Hash() string {
    62  	return hex.EncodeToString(r.hasher.Sum(nil))
    63  }
    64  
    65  func (r *HashingReader) Read(b []byte) (int, error) {
    66  	w, err := r.reader.Read(b)
    67  	if err == nil || err == io.EOF {
    68  		_, e := r.hasher.Write(b[0:w])
    69  		if e != nil && err == nil {
    70  			return w, e
    71  		}
    72  	}
    73  
    74  	return w, err
    75  }
    76  
    77  // RetriableReader wraps a error response of reader as RetriableError()
    78  type RetriableReader struct {
    79  	reader io.Reader
    80  }
    81  
    82  func NewRetriableReader(r io.Reader) io.Reader {
    83  	return &RetriableReader{r}
    84  }
    85  
    86  func (r *RetriableReader) Read(b []byte) (int, error) {
    87  	n, err := r.reader.Read(b)
    88  
    89  	// EOF is a successful response as it is used to signal a graceful end
    90  	// of input c.f. https://git.io/v6riQ
    91  	//
    92  	// Otherwise, if the error is non-nil and already retriable (in the
    93  	// case that the underlying reader `r.reader` is itself a
    94  	// `*RetriableReader`, return the error wholesale:
    95  	if err == nil || err == io.EOF || errors.IsRetriableError(err) {
    96  		return n, err
    97  	}
    98  
    99  	return n, errors.NewRetriableError(err)
   100  }
   101  
   102  // Spool spools the contents from 'from' to 'to' by buffering the entire
   103  // contents of 'from' into a temprorary file created in the directory "dir".
   104  // That buffer is held in memory until the file grows to larger than
   105  // 'memoryBufferLimit`, then the remaining contents are spooled to disk.
   106  //
   107  // The temporary file is cleaned up after the copy is complete.
   108  //
   109  // The number of bytes written to "to", as well as any error encountered are
   110  // returned.
   111  func Spool(to io.Writer, from io.Reader, dir string) (n int64, err error) {
   112  	// First, buffer up to `memoryBufferLimit` in memory.
   113  	buf := make([]byte, memoryBufferLimit)
   114  	if bn, err := from.Read(buf); err != nil && err != io.EOF {
   115  		return int64(bn), err
   116  	} else {
   117  		buf = buf[:bn]
   118  	}
   119  
   120  	var spool io.Reader = bytes.NewReader(buf)
   121  	if err != io.EOF {
   122  		// If we weren't at the end of the stream, create a temporary
   123  		// file, and spool the remaining contents there.
   124  		tmp, err := ioutil.TempFile(dir, "")
   125  		if err != nil {
   126  			return 0, errors.Wrap(err, "spool tmp")
   127  		}
   128  		defer os.Remove(tmp.Name())
   129  
   130  		if n, err = io.Copy(tmp, from); err != nil {
   131  			return n, errors.Wrap(err, "unable to spool")
   132  		}
   133  
   134  		if _, err = tmp.Seek(0, io.SeekStart); err != nil {
   135  			return 0, errors.Wrap(err, "unable to seek")
   136  		}
   137  
   138  		// The spooled contents will now be the concatenation of the
   139  		// contents we stored in memory, then the remainder of the
   140  		// contents on disk.
   141  		spool = io.MultiReader(spool, tmp)
   142  	}
   143  
   144  	return io.Copy(to, spool)
   145  }