gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/downloaddestination.go (about) 1 package renter 2 3 // Downloads can be written directly to a file, can be written to an http 4 // stream, or can be written to an in-memory buffer. The core download loop only 5 // has the concept of writing using WriteAt, and then calling Close when the 6 // download is complete. 7 // 8 // To support streaming and writing to memory buffers, the downloadDestination 9 // interface exists. It is used to map things like a []byte or an io.WriteCloser 10 // to a downloadDestination. This interface is implemented by: 11 // + os.File 12 // + downloadDestinationBuffer (an alias of a []byte) 13 // + downloadDestinationWriteCloser (created using an io.WriteCloser) 14 // 15 // There is also a helper function to convert an io.Writer to an io.WriteCloser, 16 // so that an io.Writer can be used to create a downloadDestinationWriteCloser 17 // as well. 18 19 import ( 20 "errors" 21 "io" 22 "os" 23 "sync" 24 "time" 25 26 "gitlab.com/NebulousLabs/fastrand" 27 28 "gitlab.com/SiaPrime/SiaPrime/modules" 29 ) 30 31 // skipWriter is a helper type that ignores the first 'skip' bytes written to it. 32 type skipWriter struct { 33 w io.Writer 34 skip int 35 } 36 37 // Write will write bytes to the skipWriter, being sure to skip over any bytes 38 // which the skipWriter was initialized to skip 39 func (sw *skipWriter) Write(p []byte) (int, error) { 40 if sw.skip == 0 { 41 return sw.w.Write(p) 42 } else if sw.skip > len(p) { 43 sw.skip -= len(p) 44 return len(p), nil 45 } 46 n, err := sw.w.Write(p[sw.skip:]) 47 n += sw.skip 48 sw.skip = 0 49 return n, err 50 } 51 52 // SectionWriter implements Write on a section 53 // of an underlying WriterAt. 54 type SectionWriter struct { 55 w io.WriterAt 56 base int64 57 off int64 58 limit int64 59 } 60 61 // errSectionWriteOutOfBounds is an error returned by the section writer if a 62 // write would cross the boundaries between section. 63 var errSectionWriteOutOfBounds = errors.New("section write is out of bounds") 64 65 // NewSectionWriter returns a SectionWriter that writes to w 66 // starting at offset off and stops with EOF after n bytes. 67 func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter { 68 return &SectionWriter{w, off, off, off + n} 69 } 70 71 // Write implements the io.Writer interface using WriteAt. 72 func (s *SectionWriter) Write(p []byte) (n int, err error) { 73 if s.off >= s.limit { 74 return 0, errSectionWriteOutOfBounds 75 } 76 if int64(len(p)) > s.limit-s.off { 77 return 0, errSectionWriteOutOfBounds 78 } 79 n, err = s.w.WriteAt(p, s.off) 80 s.off += int64(n) 81 return 82 } 83 84 // downloadDestination is the interface that receives the data recovered by the 85 // download process. The call to WritePieces is in `threadedRecoverLogicalData`. 86 // 87 // The downloadDestination interface takes a bunch of pieces because different 88 // types of destinations will prefer receiving pieces over receiving continuous 89 // data. The destinations that prefer taking continuous data can have the 90 // WritePieces method of the interface convert the pieces into continuous data. 91 type downloadDestination interface { 92 // WritePieces takes the set of pieces from the chunk as input. There should 93 // be at least `minPieces` pieces, but they do not need to be the data 94 // pieces - the downloadDestination can check and determine if a recovery is 95 // required. 96 // 97 // The pieces are provided decrypted. If we did not need to decrypt the 98 // data, there would be little point in fetching the data. 99 WritePieces(ec modules.ErasureCoder, pieces [][]byte, dataOffset uint64, writeOffset int64, length uint64) error 100 } 101 102 // downloadDestinationBuffer writes logical chunk data to an in-memory buffer. 103 // This buffer is primarily used when performing repairs on uploads. 104 type downloadDestinationBuffer struct { 105 pieces [][]byte 106 } 107 108 // NewDownloadDestinationBuffer allocates the necessary number of shards for 109 // the downloadDestinationBuffer and returns the new buffer. 110 func NewDownloadDestinationBuffer() *downloadDestinationBuffer { 111 return &downloadDestinationBuffer{} 112 } 113 114 // WritePieces stores the provided pieces for later processing. 115 func (dw *downloadDestinationBuffer) WritePieces(_ modules.ErasureCoder, pieces [][]byte, _ uint64, _ int64, _ uint64) error { 116 dw.pieces = pieces 117 return nil 118 } 119 120 // downloadDestinationFile wraps an os.File into a downloadDestination. 121 type downloadDestinationFile struct { 122 deps modules.Dependencies 123 f *os.File 124 staticChunkSize int64 125 } 126 127 // Close implements the io.Closer interface for downloadDestinationFile. 128 func (ddf *downloadDestinationFile) Close() error { 129 return ddf.f.Close() 130 } 131 132 // WritePieces will decode the pieces and write them to a file at the provided 133 // offset, using the provided length. 134 func (ddf *downloadDestinationFile) WritePieces(ec modules.ErasureCoder, pieces [][]byte, dataOffset uint64, offset int64, length uint64) error { 135 sw := NewSectionWriter(ddf.f, offset, ddf.staticChunkSize) 136 if ddf.deps.Disrupt("PostponeWritePiecesRecovery") { 137 time.Sleep(time.Duration(fastrand.Intn(1000)) * time.Millisecond) 138 } 139 return ec.Recover(pieces, dataOffset+length, &skipWriter{w: sw, skip: int(dataOffset)}) 140 } 141 142 // downloadDestinationWriter is a downloadDestination that writes to an 143 // underlying data stream. The data stream is expecting sequential data while 144 // the download chunks will be written in an arbitrary order using calls to 145 // WriteAt. We need to block the calls to WriteAt until all prior data has been 146 // written. 147 // 148 // NOTE: If the caller accidentally leaves a gap between calls to WriteAt, for 149 // example writes bytes 0-100 and then writes bytes 110-200, and accidentally 150 // never writes bytes 100-110, the downloadDestinationWriteCloser will block 151 // forever waiting for those gap bytes to be written. 152 // 153 // NOTE: Calling WriteAt has linear time performance in the number of concurrent 154 // calls to WriteAt. 155 type downloadDestinationWriter struct { 156 closed bool 157 mu sync.Mutex // Protects the underlying data structures. 158 progress int64 // How much data has been written yet. 159 io.Writer // The underlying writer. 160 161 // A list of write calls and their corresponding locks. When one write call 162 // completes, it'll search through the list of write calls for the next one. 163 // The next write call can be unblocked by unlocking the corresponding mutex 164 // in the next array. 165 blockingWriteCalls []int64 // A list of write calls that are waiting for their turn 166 blockingWriteSignals []*sync.Mutex 167 } 168 169 var ( 170 // errClosedStream gets returned if the stream was closed but we are trying 171 // to write. 172 errClosedStream = errors.New("unable to write because stream has been closed") 173 174 // errOffsetAlreadyWritten gets returned if a call to WriteAt tries to write 175 // to a place in the stream which has already had data written to it. 176 errOffsetAlreadyWritten = errors.New("cannot write to that offset in stream, data already written") 177 ) 178 179 // newDownloadDestinationWriter takes an io.Writer and converts it 180 // into a downloadDestination. 181 func newDownloadDestinationWriter(w io.Writer) *downloadDestinationWriter { 182 return &downloadDestinationWriter{Writer: w} 183 } 184 185 // unblockNextWrites will iterate over all of the blocking write calls and 186 // unblock any whose offsets have been reached by the current progress of the 187 // stream. 188 // 189 // NOTE: unblockNextWrites has linear time performance in the number of currently 190 // blocking calls. 191 func (ddw *downloadDestinationWriter) unblockNextWrites() { 192 for i, offset := range ddw.blockingWriteCalls { 193 if offset <= ddw.progress { 194 ddw.blockingWriteSignals[i].Unlock() 195 ddw.blockingWriteCalls = append(ddw.blockingWriteCalls[0:i], ddw.blockingWriteCalls[i+1:]...) 196 ddw.blockingWriteSignals = append(ddw.blockingWriteSignals[0:i], ddw.blockingWriteSignals[i+1:]...) 197 } 198 } 199 } 200 201 // Close will unblock any hanging calls to WriteAt, and then call Close on the 202 // underlying WriteCloser. 203 func (ddw *downloadDestinationWriter) Close() error { 204 ddw.mu.Lock() 205 if ddw.closed { 206 ddw.mu.Unlock() 207 return errClosedStream 208 } 209 ddw.closed = true 210 for i := range ddw.blockingWriteSignals { 211 ddw.blockingWriteSignals[i].Unlock() 212 } 213 ddw.mu.Unlock() 214 return nil 215 } 216 217 // WritePieces will block until the stream has progressed to 'offset', and then 218 // decode the pieces and write them. An error will be returned if the stream has 219 // already progressed beyond 'offset'. 220 func (ddw *downloadDestinationWriter) WritePieces(ec modules.ErasureCoder, pieces [][]byte, dataOffset uint64, offset int64, length uint64) error { 221 write := func() error { 222 // Error if the stream has been closed. 223 if ddw.closed { 224 return errClosedStream 225 } 226 // Error if the stream has progressed beyond 'offset'. 227 if offset < ddw.progress { 228 ddw.mu.Unlock() 229 return errOffsetAlreadyWritten 230 } 231 232 // Write the data to the stream, and the update the progress and unblock 233 // the next write. 234 err := ec.Recover(pieces, dataOffset+length, &skipWriter{w: ddw, skip: int(dataOffset)}) 235 ddw.progress += int64(length) 236 ddw.unblockNextWrites() 237 return err 238 } 239 240 ddw.mu.Lock() 241 // Attempt to write if the stream progress is at or beyond the offset. The 242 // write call will perform error handling. 243 if offset <= ddw.progress { 244 err := write() 245 ddw.mu.Unlock() 246 return err 247 } 248 249 // The stream has not yet progressed to 'offset'. We will block until the 250 // stream has made progress. We perform the block by creating a 251 // thread-specific mutex 'myMu' and adding it to the object's list of 252 // blocking threads. When other threads successfully call WriteAt, they will 253 // reference this list and unblock any which have enough progress. The 254 // result is a somewhat strange construction where we lock myMu twice in a 255 // row, but between those two calls to lock, we put myMu in a place where 256 // another thread can unlock myMu. 257 // 258 // myMu will be unblocked when another thread calls 'unblockNextWrites'. 259 myMu := new(sync.Mutex) 260 myMu.Lock() 261 ddw.blockingWriteCalls = append(ddw.blockingWriteCalls, offset) 262 ddw.blockingWriteSignals = append(ddw.blockingWriteSignals, myMu) 263 ddw.mu.Unlock() 264 myMu.Lock() 265 ddw.mu.Lock() 266 err := write() 267 ddw.mu.Unlock() 268 return err 269 }