github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/fs/operations/reopen.go (about) 1 package operations 2 3 import ( 4 "context" 5 "io" 6 "sync" 7 8 "github.com/ncw/rclone/fs" 9 "github.com/pkg/errors" 10 ) 11 12 // reOpen is a wrapper for an object reader which reopens the stream on error 13 type reOpen struct { 14 ctx context.Context 15 mu sync.Mutex // mutex to protect the below 16 src fs.Object // object to open 17 hashOption *fs.HashesOption // option to pass to initial open 18 rangeOption *fs.RangeOption // option to pass to initial open 19 rc io.ReadCloser // underlying stream 20 read int64 // number of bytes read from this stream 21 maxTries int // maximum number of retries 22 tries int // number of retries we've had so far in this stream 23 err error // if this is set then Read/Close calls will return it 24 opened bool // if set then rc is valid and needs closing 25 } 26 27 var ( 28 errorFileClosed = errors.New("file already closed") 29 errorTooManyTries = errors.New("failed to reopen: too many retries") 30 ) 31 32 // newReOpen makes a handle which will reopen itself and seek to where it was on errors 33 // 34 // If hashOption is set this will be applied when reading from the start 35 // 36 // If rangeOption is set then this will applied when reading from the 37 // start, and updated on retries. 38 func newReOpen(ctx context.Context, src fs.Object, hashOption *fs.HashesOption, rangeOption *fs.RangeOption, maxTries int) (rc io.ReadCloser, err error) { 39 h := &reOpen{ 40 ctx: ctx, 41 src: src, 42 hashOption: hashOption, 43 rangeOption: rangeOption, 44 maxTries: maxTries, 45 } 46 h.mu.Lock() 47 defer h.mu.Unlock() 48 err = h.open() 49 if err != nil { 50 return nil, err 51 } 52 return h, nil 53 } 54 55 // open the underlying handle - call with lock held 56 // 57 // we don't retry here as the Open() call will itself have low level retries 58 func (h *reOpen) open() error { 59 var optsArray [2]fs.OpenOption 60 var opts = optsArray[:0] 61 if h.read == 0 { 62 if h.rangeOption != nil { 63 opts = append(opts, h.rangeOption) 64 } 65 if h.hashOption != nil { 66 // put hashOption on if reading from the start, ditch otherwise 67 opts = append(opts, h.hashOption) 68 } 69 } else { 70 if h.rangeOption != nil { 71 // range to the read point 72 opts = append(opts, &fs.RangeOption{Start: h.rangeOption.Start + h.read, End: h.rangeOption.End}) 73 } else { 74 // seek to the read point 75 opts = append(opts, &fs.SeekOption{Offset: h.read}) 76 } 77 } 78 h.tries++ 79 if h.tries > h.maxTries { 80 h.err = errorTooManyTries 81 } else { 82 h.rc, h.err = h.src.Open(h.ctx, opts...) 83 } 84 if h.err != nil { 85 if h.tries > 1 { 86 fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err) 87 } 88 return h.err 89 } 90 h.opened = true 91 return nil 92 } 93 94 // Read bytes retrying as necessary 95 func (h *reOpen) Read(p []byte) (n int, err error) { 96 h.mu.Lock() 97 defer h.mu.Unlock() 98 if h.err != nil { 99 // return a previous error if there is one 100 return n, h.err 101 } 102 n, err = h.rc.Read(p) 103 if err != nil { 104 h.err = err 105 } 106 h.read += int64(n) 107 if err != nil && err != io.EOF { 108 // close underlying stream 109 h.opened = false 110 _ = h.rc.Close() 111 // reopen stream, clearing error if successful 112 fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err) 113 if h.open() == nil { 114 err = nil 115 } 116 } 117 return n, err 118 } 119 120 // Close the stream 121 func (h *reOpen) Close() error { 122 h.mu.Lock() 123 defer h.mu.Unlock() 124 if !h.opened { 125 return errorFileClosed 126 } 127 h.opened = false 128 h.err = errorFileClosed 129 return h.rc.Close() 130 }