github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/fs/operations/reopen.go (about) 1 package operations 2 3 import ( 4 "context" 5 "io" 6 "sync" 7 8 "github.com/pkg/errors" 9 "github.com/rclone/rclone/fs" 10 "github.com/rclone/rclone/fs/fserrors" 11 ) 12 13 // reOpen is a wrapper for an object reader which reopens the stream on error 14 type reOpen struct { 15 ctx context.Context 16 mu sync.Mutex // mutex to protect the below 17 src fs.Object // object to open 18 hashOption *fs.HashesOption // option to pass to initial open 19 rangeOption *fs.RangeOption // option to pass to initial open 20 rc io.ReadCloser // underlying stream 21 read int64 // number of bytes read from this stream 22 maxTries int // maximum number of retries 23 tries int // number of retries we've had so far in this stream 24 err error // if this is set then Read/Close calls will return it 25 opened bool // if set then rc is valid and needs closing 26 } 27 28 var ( 29 errorFileClosed = errors.New("file already closed") 30 errorTooManyTries = errors.New("failed to reopen: too many retries") 31 ) 32 33 // newReOpen makes a handle which will reopen itself and seek to where it was on errors 34 // 35 // If hashOption is set this will be applied when reading from the start 36 // 37 // If rangeOption is set then this will applied when reading from the 38 // start, and updated on retries. 39 func newReOpen(ctx context.Context, src fs.Object, hashOption *fs.HashesOption, rangeOption *fs.RangeOption, maxTries int) (rc io.ReadCloser, err error) { 40 h := &reOpen{ 41 ctx: ctx, 42 src: src, 43 hashOption: hashOption, 44 rangeOption: rangeOption, 45 maxTries: maxTries, 46 } 47 h.mu.Lock() 48 defer h.mu.Unlock() 49 err = h.open() 50 if err != nil { 51 return nil, err 52 } 53 return h, nil 54 } 55 56 // open the underlying handle - call with lock held 57 // 58 // we don't retry here as the Open() call will itself have low level retries 59 func (h *reOpen) open() error { 60 var optsArray [2]fs.OpenOption 61 var opts = optsArray[:0] 62 if h.read == 0 { 63 if h.rangeOption != nil { 64 opts = append(opts, h.rangeOption) 65 } 66 if h.hashOption != nil { 67 // put hashOption on if reading from the start, ditch otherwise 68 opts = append(opts, h.hashOption) 69 } 70 } else { 71 if h.rangeOption != nil { 72 // range to the read point 73 opts = append(opts, &fs.RangeOption{Start: h.rangeOption.Start + h.read, End: h.rangeOption.End}) 74 } else { 75 // seek to the read point 76 opts = append(opts, &fs.SeekOption{Offset: h.read}) 77 } 78 } 79 h.tries++ 80 if h.tries > h.maxTries { 81 h.err = errorTooManyTries 82 } else { 83 h.rc, h.err = h.src.Open(h.ctx, opts...) 84 } 85 if h.err != nil { 86 if h.tries > 1 { 87 fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err) 88 } 89 return h.err 90 } 91 h.opened = true 92 return nil 93 } 94 95 // Read bytes retrying as necessary 96 func (h *reOpen) Read(p []byte) (n int, err error) { 97 h.mu.Lock() 98 defer h.mu.Unlock() 99 if h.err != nil { 100 // return a previous error if there is one 101 return n, h.err 102 } 103 n, err = h.rc.Read(p) 104 if err != nil { 105 h.err = err 106 } 107 h.read += int64(n) 108 if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) { 109 // close underlying stream 110 h.opened = false 111 _ = h.rc.Close() 112 // reopen stream, clearing error if successful 113 fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err) 114 if h.open() == nil { 115 err = nil 116 } 117 } 118 return n, err 119 } 120 121 // Close the stream 122 func (h *reOpen) Close() error { 123 h.mu.Lock() 124 defer h.mu.Unlock() 125 if !h.opened { 126 return errorFileClosed 127 } 128 h.opened = false 129 h.err = errorFileClosed 130 return h.rc.Close() 131 }