github.com/NebulousLabs/Sia@v1.3.7/modules/renter/files.go (about) 1 package renter 2 3 import ( 4 "errors" 5 "fmt" 6 "math" 7 "os" 8 "path/filepath" 9 "sync" 10 11 "github.com/NebulousLabs/Sia/build" 12 "github.com/NebulousLabs/Sia/crypto" 13 "github.com/NebulousLabs/Sia/modules" 14 "github.com/NebulousLabs/Sia/persist" 15 "github.com/NebulousLabs/Sia/types" 16 ) 17 18 var ( 19 // ErrEmptyFilename is an error when filename is empty 20 ErrEmptyFilename = errors.New("filename must be a nonempty string") 21 // ErrPathOverload is an error when a file already exists at that location 22 ErrPathOverload = errors.New("a file already exists at that location") 23 // ErrUnknownPath is an error when a file cannot be found with the given path 24 ErrUnknownPath = errors.New("no file known with that path") 25 ) 26 27 // A file is a single file that has been uploaded to the network. Files are 28 // split into equal-length chunks, which are then erasure-coded into pieces. 29 // Each piece is separately encrypted, using a key derived from the file's 30 // master key. The pieces are uploaded to hosts in groups, such that one file 31 // contract covers many pieces. 32 type file struct { 33 name string 34 size uint64 // Static - can be accessed without lock. 35 contracts map[types.FileContractID]fileContract 36 masterKey crypto.TwofishKey // Static - can be accessed without lock. 37 erasureCode modules.ErasureCoder // Static - can be accessed without lock. 38 pieceSize uint64 // Static - can be accessed without lock. 39 mode uint32 // actually an os.FileMode 40 deleted bool // indicates if the file has been deleted. 41 42 staticUID string // A UID assigned to the file when it gets created. 43 44 mu sync.RWMutex 45 } 46 47 // A fileContract is a contract covering an arbitrary number of file pieces. 48 // Chunk/Piece metadata is used to split the raw contract data appropriately. 49 type fileContract struct { 50 ID types.FileContractID 51 IP modules.NetAddress 52 Pieces []pieceData 53 54 WindowStart types.BlockHeight 55 } 56 57 // pieceData contains the metadata necessary to request a piece from a 58 // fetcher. 59 // 60 // TODO: Add an 'Unavailable' flag that can be set if the host loses the piece. 61 // Some TODOs exist in 'repair.go' related to this field. 62 type pieceData struct { 63 Chunk uint64 // which chunk the piece belongs to 64 Piece uint64 // the index of the piece in the chunk 65 MerkleRoot crypto.Hash // the Merkle root of the piece 66 } 67 68 // deriveKey derives the key used to encrypt and decrypt a specific file piece. 69 func deriveKey(masterKey crypto.TwofishKey, chunkIndex, pieceIndex uint64) crypto.TwofishKey { 70 return crypto.TwofishKey(crypto.HashAll(masterKey, chunkIndex, pieceIndex)) 71 } 72 73 // staticChunkSize returns the size of one chunk. 74 func (f *file) staticChunkSize() uint64 { 75 return f.pieceSize * uint64(f.erasureCode.MinPieces()) 76 } 77 78 // numChunks returns the number of chunks that f was split into. 79 func (f *file) numChunks() uint64 { 80 // empty files still need at least one chunk 81 if f.size == 0 { 82 return 1 83 } 84 n := f.size / f.staticChunkSize() 85 // last chunk will be padded, unless chunkSize divides file evenly. 86 if f.size%f.staticChunkSize() != 0 { 87 n++ 88 } 89 return n 90 } 91 92 // available indicates whether the file is ready to be downloaded. 93 func (f *file) available(offline map[types.FileContractID]bool) bool { 94 chunkPieces := make([]int, f.numChunks()) 95 for _, fc := range f.contracts { 96 if offline[fc.ID] { 97 continue 98 } 99 for _, p := range fc.Pieces { 100 chunkPieces[p.Chunk]++ 101 } 102 } 103 for _, n := range chunkPieces { 104 if n < f.erasureCode.MinPieces() { 105 return false 106 } 107 } 108 return true 109 } 110 111 // uploadedBytes indicates how many bytes of the file have been uploaded via 112 // current file contracts. Note that this includes padding and redundancy, so 113 // uploadedBytes can return a value much larger than the file's original filesize. 114 func (f *file) uploadedBytes() uint64 { 115 var uploaded uint64 116 for _, fc := range f.contracts { 117 // Note: we need to multiply by SectorSize here instead of 118 // f.pieceSize because the actual bytes uploaded include overhead 119 // from TwoFish encryption 120 uploaded += uint64(len(fc.Pieces)) * modules.SectorSize 121 } 122 return uploaded 123 } 124 125 // uploadProgress indicates what percentage of the file (plus redundancy) has 126 // been uploaded. Note that a file may be Available long before UploadProgress 127 // reaches 100%, and UploadProgress may report a value greater than 100%. 128 func (f *file) uploadProgress() float64 { 129 uploaded := f.uploadedBytes() 130 desired := modules.SectorSize * uint64(f.erasureCode.NumPieces()) * f.numChunks() 131 132 return math.Min(100*(float64(uploaded)/float64(desired)), 100) 133 } 134 135 // redundancy returns the redundancy of the least redundant chunk. A file 136 // becomes available when this redundancy is >= 1. Assumes that every piece is 137 // unique within a file contract. -1 is returned if the file has size 0. It 138 // takes one argument, a map of offline contracts for this file. 139 func (f *file) redundancy(offlineMap map[types.FileContractID]bool, goodForRenewMap map[types.FileContractID]bool) float64 { 140 if f.size == 0 { 141 return -1 142 } 143 piecesPerChunk := make([]int, f.numChunks()) 144 piecesPerChunkNoRenew := make([]int, f.numChunks()) 145 // If the file has non-0 size then the number of chunks should also be 146 // non-0. Therefore the f.size == 0 conditional block above must appear 147 // before this check. 148 if len(piecesPerChunk) == 0 { 149 build.Critical("cannot get redundancy of a file with 0 chunks") 150 return -1 151 } 152 // pieceRenewMap stores each encountered piece and a boolean to indicate if 153 // that piece was already encountered on a goodForRenew contract. 154 pieceRenewMap := make(map[string]bool) 155 for _, fc := range f.contracts { 156 offline, exists1 := offlineMap[fc.ID] 157 goodForRenew, exists2 := goodForRenewMap[fc.ID] 158 if exists1 != exists2 { 159 build.Critical("contract can't be in one map but not in the other") 160 } 161 if !exists1 { 162 continue 163 } 164 165 // do not count pieces from the contract if the contract is offline 166 if offline { 167 continue 168 } 169 for _, p := range fc.Pieces { 170 pieceKey := fmt.Sprintf("%v/%v", p.Chunk, p.Piece) 171 // If the piece is redundant we need to check if the same piece was 172 // encountered on a goodForRenew contract before. If it wasn't we 173 // need to increase the piecesPerChunk counter and set the value of 174 // the pieceKey entry to true. Otherwise we just ignore the piece. 175 if gfr, redundant := pieceRenewMap[pieceKey]; redundant && gfr { 176 continue 177 } else if redundant && !gfr { 178 pieceRenewMap[pieceKey] = true 179 piecesPerChunk[p.Chunk]++ 180 continue 181 } 182 pieceRenewMap[pieceKey] = goodForRenew 183 184 // If the contract is goodForRenew, increment the entry in both 185 // maps. If not, only the one in piecesPerChunkNoRenew. 186 if goodForRenew { 187 piecesPerChunk[p.Chunk]++ 188 } 189 piecesPerChunkNoRenew[p.Chunk]++ 190 } 191 } 192 // Find the chunk with the least finished pieces counting only pieces of 193 // contracts that are goodForRenew. 194 minPieces := piecesPerChunk[0] 195 for _, numPieces := range piecesPerChunk { 196 if numPieces < minPieces { 197 minPieces = numPieces 198 } 199 } 200 // Find the chunk with the least finished pieces including pieces from 201 // contracts that are not good for renewal. 202 minPiecesNoRenew := piecesPerChunkNoRenew[0] 203 for _, numPieces := range piecesPerChunkNoRenew { 204 if numPieces < minPiecesNoRenew { 205 minPiecesNoRenew = numPieces 206 } 207 } 208 // If the redundancy is smaller than 1x we return the redundancy that 209 // includes contracts that are not good for renewal. The reason for this is 210 // a better user experience. If the renter operates correctly, redundancy 211 // should never go above numPieces / minPieces and redundancyNoRenew should 212 // never go below 1. 213 redundancy := float64(minPieces) / float64(f.erasureCode.MinPieces()) 214 redundancyNoRenew := float64(minPiecesNoRenew) / float64(f.erasureCode.MinPieces()) 215 if redundancy < 1 { 216 return redundancyNoRenew 217 } 218 return redundancy 219 } 220 221 // expiration returns the lowest height at which any of the file's contracts 222 // will expire. 223 func (f *file) expiration() types.BlockHeight { 224 if len(f.contracts) == 0 { 225 return 0 226 } 227 lowest := ^types.BlockHeight(0) 228 for _, fc := range f.contracts { 229 if fc.WindowStart < lowest { 230 lowest = fc.WindowStart 231 } 232 } 233 return lowest 234 } 235 236 // newFile creates a new file object. 237 func newFile(name string, code modules.ErasureCoder, pieceSize, fileSize uint64) *file { 238 return &file{ 239 name: name, 240 size: fileSize, 241 contracts: make(map[types.FileContractID]fileContract), 242 masterKey: crypto.GenerateTwofishKey(), 243 erasureCode: code, 244 pieceSize: pieceSize, 245 246 staticUID: persist.RandomSuffix(), 247 } 248 } 249 250 // DeleteFile removes a file entry from the renter and deletes its data from 251 // the hosts it is stored on. 252 // 253 // TODO: The data is not cleared from any contracts where the host is not 254 // immediately online. 255 func (r *Renter) DeleteFile(nickname string) error { 256 lockID := r.mu.Lock() 257 f, exists := r.files[nickname] 258 if !exists { 259 r.mu.Unlock(lockID) 260 return ErrUnknownPath 261 } 262 delete(r.files, nickname) 263 delete(r.persist.Tracking, nickname) 264 265 err := persist.RemoveFile(filepath.Join(r.persistDir, f.name+ShareExtension)) 266 if err != nil { 267 r.log.Println("WARN: couldn't remove file :", err) 268 } 269 270 r.saveSync() 271 r.mu.Unlock(lockID) 272 273 // delete the file's associated contract data. 274 f.mu.Lock() 275 defer f.mu.Unlock() 276 277 // mark the file as deleted 278 f.deleted = true 279 280 // TODO: delete the sectors of the file as well. 281 282 return nil 283 } 284 285 // FileList returns all of the files that the renter has. 286 func (r *Renter) FileList() []modules.FileInfo { 287 // Get all the files and their contracts 288 var files []*file 289 contractIDs := make(map[types.FileContractID]struct{}) 290 lockID := r.mu.RLock() 291 for _, f := range r.files { 292 files = append(files, f) 293 f.mu.RLock() 294 for cid := range f.contracts { 295 contractIDs[cid] = struct{}{} 296 } 297 f.mu.RUnlock() 298 } 299 r.mu.RUnlock(lockID) 300 301 // Build 2 maps that map every contract id to its offline and goodForRenew 302 // status. 303 goodForRenew := make(map[types.FileContractID]bool) 304 offline := make(map[types.FileContractID]bool) 305 for cid := range contractIDs { 306 resolvedKey := r.hostContractor.ResolveIDToPubKey(cid) 307 cu, ok := r.hostContractor.ContractUtility(resolvedKey) 308 if !ok { 309 continue 310 } 311 goodForRenew[cid] = ok && cu.GoodForRenew 312 offline[cid] = r.hostContractor.IsOffline(resolvedKey) 313 } 314 315 // Build the list of FileInfos. 316 fileList := []modules.FileInfo{} 317 for _, f := range files { 318 lockID := r.mu.RLock() 319 f.mu.RLock() 320 renewing := true 321 var localPath string 322 tf, exists := r.persist.Tracking[f.name] 323 if exists { 324 localPath = tf.RepairPath 325 } 326 fileList = append(fileList, modules.FileInfo{ 327 SiaPath: f.name, 328 LocalPath: localPath, 329 Filesize: f.size, 330 Renewing: renewing, 331 Available: f.available(offline), 332 Redundancy: f.redundancy(offline, goodForRenew), 333 UploadedBytes: f.uploadedBytes(), 334 UploadProgress: f.uploadProgress(), 335 Expiration: f.expiration(), 336 }) 337 f.mu.RUnlock() 338 r.mu.RUnlock(lockID) 339 } 340 return fileList 341 } 342 343 // File returns file from siaPath queried by user. 344 // Update based on FileList 345 func (r *Renter) File(siaPath string) (modules.FileInfo, error) { 346 var fileInfo modules.FileInfo 347 348 // Get the file and its contracts 349 contractIDs := make(map[types.FileContractID]struct{}) 350 lockID := r.mu.RLock() 351 defer r.mu.RUnlock(lockID) 352 file, exists := r.files[siaPath] 353 if !exists { 354 return fileInfo, ErrUnknownPath 355 } 356 file.mu.RLock() 357 defer file.mu.RUnlock() 358 for cid := range file.contracts { 359 contractIDs[cid] = struct{}{} 360 } 361 362 // Build 2 maps that map every contract id to its offline and goodForRenew 363 // status. 364 goodForRenew := make(map[types.FileContractID]bool) 365 offline := make(map[types.FileContractID]bool) 366 for cid := range contractIDs { 367 resolvedKey := r.hostContractor.ResolveIDToPubKey(cid) 368 cu, ok := r.hostContractor.ContractUtility(resolvedKey) 369 if !ok { 370 continue 371 } 372 goodForRenew[cid] = ok && cu.GoodForRenew 373 offline[cid] = r.hostContractor.IsOffline(resolvedKey) 374 } 375 376 // Build the FileInfo 377 renewing := true 378 var localPath string 379 tf, exists := r.persist.Tracking[file.name] 380 if exists { 381 localPath = tf.RepairPath 382 } 383 fileInfo = modules.FileInfo{ 384 SiaPath: file.name, 385 LocalPath: localPath, 386 Filesize: file.size, 387 Renewing: renewing, 388 Available: file.available(offline), 389 Redundancy: file.redundancy(offline, goodForRenew), 390 UploadedBytes: file.uploadedBytes(), 391 UploadProgress: file.uploadProgress(), 392 Expiration: file.expiration(), 393 } 394 395 return fileInfo, nil 396 } 397 398 // RenameFile takes an existing file and changes the nickname. The original 399 // file must exist, and there must not be any file that already has the 400 // replacement nickname. 401 func (r *Renter) RenameFile(currentName, newName string) error { 402 lockID := r.mu.Lock() 403 defer r.mu.Unlock(lockID) 404 405 err := validateSiapath(newName) 406 if err != nil { 407 return err 408 } 409 410 // Check that currentName exists and newName doesn't. 411 file, exists := r.files[currentName] 412 if !exists { 413 return ErrUnknownPath 414 } 415 _, exists = r.files[newName] 416 if exists { 417 return ErrPathOverload 418 } 419 420 // Modify the file and save it to disk. 421 file.mu.Lock() 422 file.name = newName 423 err = r.saveFile(file) 424 file.mu.Unlock() 425 if err != nil { 426 return err 427 } 428 429 // Update the entries in the renter. 430 delete(r.files, currentName) 431 r.files[newName] = file 432 if t, ok := r.persist.Tracking[currentName]; ok { 433 delete(r.persist.Tracking, currentName) 434 r.persist.Tracking[newName] = t 435 } 436 err = r.saveSync() 437 if err != nil { 438 return err 439 } 440 441 // Delete the old .sia file. 442 oldPath := filepath.Join(r.persistDir, currentName+ShareExtension) 443 return os.RemoveAll(oldPath) 444 }