github.com/avahowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/renter/files.go (about) 1 package renter 2 3 import ( 4 "errors" 5 "math" 6 "os" 7 "path/filepath" 8 "sync" 9 10 "github.com/NebulousLabs/Sia/build" 11 "github.com/NebulousLabs/Sia/crypto" 12 "github.com/NebulousLabs/Sia/modules" 13 "github.com/NebulousLabs/Sia/types" 14 ) 15 16 var ( 17 ErrUnknownPath = errors.New("no file known with that path") 18 ErrPathOverload = errors.New("a file already exists at that location") 19 ) 20 21 // A file is a single file that has been uploaded to the network. Files are 22 // split into equal-length chunks, which are then erasure-coded into pieces. 23 // Each piece is separately encrypted, using a key derived from the file's 24 // master key. The pieces are uploaded to hosts in groups, such that one file 25 // contract covers many pieces. 26 type file struct { 27 name string 28 size uint64 29 contracts map[types.FileContractID]fileContract 30 masterKey crypto.TwofishKey 31 erasureCode modules.ErasureCoder 32 pieceSize uint64 33 mode uint32 // actually an os.FileMode 34 mu sync.RWMutex 35 } 36 37 // A fileContract is a contract covering an arbitrary number of file pieces. 38 // Chunk/Piece metadata is used to split the raw contract data appropriately. 39 type fileContract struct { 40 ID types.FileContractID 41 IP modules.NetAddress 42 Pieces []pieceData 43 44 WindowStart types.BlockHeight 45 } 46 47 // pieceData contains the metadata necessary to request a piece from a 48 // fetcher. 49 type pieceData struct { 50 Chunk uint64 // which chunk the piece belongs to 51 Piece uint64 // the index of the piece in the chunk 52 MerkleRoot crypto.Hash // the Merkle root of the piece 53 } 54 55 // deriveKey derives the key used to encrypt and decrypt a specific file piece. 56 func deriveKey(masterKey crypto.TwofishKey, chunkIndex, pieceIndex uint64) crypto.TwofishKey { 57 return crypto.TwofishKey(crypto.HashAll(masterKey, chunkIndex, pieceIndex)) 58 } 59 60 // chunkSize returns the size of one chunk. 61 func (f *file) chunkSize() uint64 { 62 return f.pieceSize * uint64(f.erasureCode.MinPieces()) 63 } 64 65 // numChunks returns the number of chunks that f was split into. 66 func (f *file) numChunks() uint64 { 67 // empty files still need at least one chunk 68 if f.size == 0 { 69 return 1 70 } 71 n := f.size / f.chunkSize() 72 // last chunk will be padded, unless chunkSize divides file evenly. 73 if f.size%f.chunkSize() != 0 { 74 n++ 75 } 76 return n 77 } 78 79 // available indicates whether the file is ready to be downloaded. 80 func (f *file) available() bool { 81 f.mu.RLock() 82 defer f.mu.RUnlock() 83 chunkPieces := make([]int, f.numChunks()) 84 for _, fc := range f.contracts { 85 for _, p := range fc.Pieces { 86 chunkPieces[p.Chunk]++ 87 } 88 } 89 for _, n := range chunkPieces { 90 if n < f.erasureCode.MinPieces() { 91 return false 92 } 93 } 94 return true 95 } 96 97 // uploadProgress indicates what percentage of the file (plus redundancy) has 98 // been uploaded. Note that a file may be Available long before UploadProgress 99 // reaches 100%, and UploadProgress may report a value greater than 100%. 100 func (f *file) uploadProgress() float64 { 101 f.mu.RLock() 102 defer f.mu.RUnlock() 103 var uploaded uint64 104 for _, fc := range f.contracts { 105 uploaded += uint64(len(fc.Pieces)) * f.pieceSize 106 } 107 desired := f.pieceSize * uint64(f.erasureCode.NumPieces()) * f.numChunks() 108 109 return 100 * (float64(uploaded) / float64(desired)) 110 } 111 112 // redundancy returns the redundancy of the least redundant chunk. A file 113 // becomes available when this redundancy is >= 1. Assumes that every piece is 114 // unique within a file contract. -1 is returned if the file has size 0. 115 func (f *file) redundancy() float64 { 116 if f.size == 0 { 117 return math.NaN() 118 } 119 piecesPerChunk := make([]int, f.numChunks()) 120 // If the file has non-0 size then the number of chunks should also be 121 // non-0. Therefore the f.size == 0 conditional block above must appear 122 // before this check. 123 if len(piecesPerChunk) == 0 { 124 build.Critical("cannot get redundancy of a file with 0 chunks") 125 return math.NaN() 126 } 127 for _, fc := range f.contracts { 128 for _, p := range fc.Pieces { 129 piecesPerChunk[p.Chunk]++ 130 } 131 } 132 minPieces := piecesPerChunk[0] 133 for _, numPieces := range piecesPerChunk { 134 if numPieces < minPieces { 135 minPieces = numPieces 136 } 137 } 138 return float64(minPieces) / float64(f.erasureCode.MinPieces()) 139 } 140 141 // expiration returns the lowest height at which any of the file's contracts 142 // will expire. 143 func (f *file) expiration() types.BlockHeight { 144 f.mu.RLock() 145 defer f.mu.RUnlock() 146 if len(f.contracts) == 0 { 147 return 0 148 } 149 lowest := ^types.BlockHeight(0) 150 for _, fc := range f.contracts { 151 if fc.WindowStart < lowest { 152 lowest = fc.WindowStart 153 } 154 } 155 return lowest 156 } 157 158 // newFile creates a new file object. 159 func newFile(name string, code modules.ErasureCoder, pieceSize, fileSize uint64) *file { 160 key, _ := crypto.GenerateTwofishKey() 161 return &file{ 162 name: name, 163 size: fileSize, 164 contracts: make(map[types.FileContractID]fileContract), 165 masterKey: key, 166 erasureCode: code, 167 pieceSize: pieceSize, 168 } 169 } 170 171 // DeleteFile removes a file entry from the renter and deletes its data from 172 // the hosts it is stored on. 173 func (r *Renter) DeleteFile(nickname string) error { 174 lockID := r.mu.Lock() 175 f, exists := r.files[nickname] 176 if !exists { 177 r.mu.Unlock(lockID) 178 return ErrUnknownPath 179 } 180 delete(r.files, nickname) 181 os.RemoveAll(filepath.Join(r.persistDir, f.name+ShareExtension)) 182 r.saveSync() 183 r.mu.Unlock(lockID) 184 185 // delete the file's associated contract data. 186 f.mu.Lock() 187 defer f.mu.Unlock() 188 189 // TODO: this is ugly because we only have the Contracts method for 190 // looking up contracts. 191 var contracts []modules.RenterContract 192 for _, c := range r.hostContractor.Contracts() { 193 if _, ok := f.contracts[c.ID]; ok { 194 contracts = append(contracts, c) 195 } 196 } 197 for _, c := range contracts { 198 editor, err := r.hostContractor.Editor(c) 199 if err != nil { 200 // TODO: what if the host isn't online? 201 continue 202 } 203 for _, root := range c.MerkleRoots { 204 editor.Delete(root) 205 } 206 delete(f.contracts, c.ID) 207 } 208 209 return nil 210 } 211 212 // FileList returns all of the files that the renter has. 213 func (r *Renter) FileList() []modules.FileInfo { 214 lockID := r.mu.RLock() 215 defer r.mu.RUnlock(lockID) 216 217 files := make([]modules.FileInfo, 0, len(r.files)) 218 for _, f := range r.files { 219 // _, renewing := r.tracking[f.name] 220 // TODO: get renewing working again 221 renewing := false 222 files = append(files, modules.FileInfo{ 223 SiaPath: f.name, 224 Filesize: f.size, 225 Available: f.available(), 226 Redundancy: f.redundancy(), 227 Renewing: renewing, 228 UploadProgress: f.uploadProgress(), 229 Expiration: f.expiration(), 230 }) 231 } 232 return files 233 } 234 235 // RenameFile takes an existing file and changes the nickname. The original 236 // file must exist, and there must not be any file that already has the 237 // replacement nickname. 238 func (r *Renter) RenameFile(currentName, newName string) error { 239 lockID := r.mu.Lock() 240 defer r.mu.Unlock(lockID) 241 242 // Check that currentName exists and newName doesn't. 243 file, exists := r.files[currentName] 244 if !exists { 245 return ErrUnknownPath 246 } 247 _, exists = r.files[newName] 248 if exists { 249 return ErrPathOverload 250 } 251 252 // Modify the file and save it to disk. 253 file.mu.Lock() 254 file.name = newName 255 err := r.saveFile(file) 256 file.mu.Unlock() 257 if err != nil { 258 return err 259 } 260 261 // Update the entries in the renter. 262 delete(r.files, currentName) 263 r.files[newName] = file 264 err = r.saveSync() 265 if err != nil { 266 return err 267 } 268 269 // Delete the old .sia file. 270 // NOTE: proper error handling is difficult here. For example, if the 271 // removal fails, should the entry in r.files be preserved? For now we will 272 // keep things simple, but it is important that our approach feels 273 // intuitive/unsurprising and doesn't put the user's data at risk. 274 oldPath := filepath.Join(r.persistDir, currentName+ShareExtension) 275 return os.RemoveAll(oldPath) 276 }