github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/cache/object.go (about) 1 // +build !plan9 2 3 package cache 4 5 import ( 6 "context" 7 "io" 8 "path" 9 "sync" 10 "time" 11 12 "github.com/ncw/rclone/fs" 13 "github.com/ncw/rclone/fs/hash" 14 "github.com/ncw/rclone/lib/readers" 15 "github.com/pkg/errors" 16 ) 17 18 const ( 19 objectInCache = "Object" 20 objectPendingUpload = "TempObject" 21 ) 22 23 // Object is a generic file like object that stores basic information about it 24 type Object struct { 25 fs.Object `json:"-"` 26 27 ParentFs fs.Fs `json:"-"` // parent fs 28 CacheFs *Fs `json:"-"` // cache fs 29 Name string `json:"name"` // name of the directory 30 Dir string `json:"dir"` // abs path of the object 31 CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown 32 CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown 33 CacheStorable bool `json:"storable"` // says whether this object can be stored 34 CacheType string `json:"cacheType"` 35 CacheTs time.Time `json:"cacheTs"` 36 CacheHashes map[hash.Type]string // all supported hashes cached 37 38 refreshMutex sync.Mutex 39 } 40 41 // NewObject builds one from a generic fs.Object 42 func NewObject(f *Fs, remote string) *Object { 43 fullRemote := path.Join(f.Root(), remote) 44 dir, name := path.Split(fullRemote) 45 46 cacheType := objectInCache 47 parentFs := f.UnWrap() 48 if f.opt.TempWritePath != "" { 49 _, err := f.cache.SearchPendingUpload(fullRemote) 50 if err == nil { // queued for upload 51 cacheType = objectPendingUpload 52 parentFs = f.tempFs 53 fs.Debugf(fullRemote, "pending upload found") 54 } 55 } 56 57 co := &Object{ 58 ParentFs: parentFs, 59 CacheFs: f, 60 Name: cleanPath(name), 61 Dir: cleanPath(dir), 62 CacheModTime: time.Now().UnixNano(), 63 CacheSize: 0, 64 CacheStorable: false, 65 CacheType: cacheType, 66 CacheTs: time.Now(), 67 } 68 return co 69 } 70 71 // ObjectFromOriginal builds one from a generic fs.Object 72 func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { 73 var co *Object 74 fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) 75 dir, name := path.Split(fullRemote) 76 77 cacheType := objectInCache 78 parentFs := f.UnWrap() 79 if f.opt.TempWritePath != "" { 80 _, err := f.cache.SearchPendingUpload(fullRemote) 81 if err == nil { // queued for upload 82 cacheType = objectPendingUpload 83 parentFs = f.tempFs 84 fs.Debugf(fullRemote, "pending upload found") 85 } 86 } 87 88 co = &Object{ 89 ParentFs: parentFs, 90 CacheFs: f, 91 Name: cleanPath(name), 92 Dir: cleanPath(dir), 93 CacheType: cacheType, 94 CacheTs: time.Now(), 95 } 96 co.updateData(ctx, o) 97 return co 98 } 99 100 func (o *Object) updateData(ctx context.Context, source fs.Object) { 101 o.Object = source 102 o.CacheModTime = source.ModTime(ctx).UnixNano() 103 o.CacheSize = source.Size() 104 o.CacheStorable = source.Storable() 105 o.CacheTs = time.Now() 106 o.CacheHashes = make(map[hash.Type]string) 107 } 108 109 // Fs returns its FS info 110 func (o *Object) Fs() fs.Info { 111 return o.CacheFs 112 } 113 114 // String returns a human friendly name for this object 115 func (o *Object) String() string { 116 if o == nil { 117 return "<nil>" 118 } 119 return o.Remote() 120 } 121 122 // Remote returns the remote path 123 func (o *Object) Remote() string { 124 p := path.Join(o.Dir, o.Name) 125 return o.CacheFs.cleanRootFromPath(p) 126 } 127 128 // abs returns the absolute path to the object 129 func (o *Object) abs() string { 130 return path.Join(o.Dir, o.Name) 131 } 132 133 // ModTime returns the cached ModTime 134 func (o *Object) ModTime(ctx context.Context) time.Time { 135 _ = o.refresh(ctx) 136 return time.Unix(0, o.CacheModTime) 137 } 138 139 // Size returns the cached Size 140 func (o *Object) Size() int64 { 141 _ = o.refresh(context.TODO()) 142 return o.CacheSize 143 } 144 145 // Storable returns the cached Storable 146 func (o *Object) Storable() bool { 147 _ = o.refresh(context.TODO()) 148 return o.CacheStorable 149 } 150 151 // refresh will check if the object info is expired and request the info from source if it is 152 // all these conditions must be true to ignore a refresh 153 // 1. cache ts didn't expire yet 154 // 2. is not pending a notification from the wrapped fs 155 func (o *Object) refresh(ctx context.Context) error { 156 isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) 157 isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) 158 if !isExpired && !isNotified { 159 return nil 160 } 161 162 return o.refreshFromSource(ctx, true) 163 } 164 165 // refreshFromSource requests the original FS for the object in case it comes from a cached entry 166 func (o *Object) refreshFromSource(ctx context.Context, force bool) error { 167 o.refreshMutex.Lock() 168 defer o.refreshMutex.Unlock() 169 var err error 170 var liveObject fs.Object 171 172 if o.Object != nil && !force { 173 return nil 174 } 175 if o.isTempFile() { 176 liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) 177 err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) 178 } else { 179 liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) 180 err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) 181 } 182 if err != nil { 183 fs.Errorf(o, "error refreshing object in : %v", err) 184 return err 185 } 186 o.updateData(ctx, liveObject) 187 o.persist() 188 189 return nil 190 } 191 192 // SetModTime sets the ModTime of this object 193 func (o *Object) SetModTime(ctx context.Context, t time.Time) error { 194 if err := o.refreshFromSource(ctx, false); err != nil { 195 return err 196 } 197 198 err := o.Object.SetModTime(ctx, t) 199 if err != nil { 200 return err 201 } 202 203 o.CacheModTime = t.UnixNano() 204 o.persist() 205 fs.Debugf(o, "updated ModTime: %v", t) 206 207 return nil 208 } 209 210 // Open is used to request a specific part of the file using fs.RangeOption 211 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { 212 var err error 213 214 if o.Object == nil { 215 err = o.refreshFromSource(ctx, true) 216 } else { 217 err = o.refresh(ctx) 218 } 219 if err != nil { 220 return nil, err 221 } 222 223 cacheReader := NewObjectHandle(ctx, o, o.CacheFs) 224 var offset, limit int64 = 0, -1 225 for _, option := range options { 226 switch x := option.(type) { 227 case *fs.SeekOption: 228 offset = x.Offset 229 case *fs.RangeOption: 230 offset, limit = x.Decode(o.Size()) 231 } 232 _, err = cacheReader.Seek(offset, io.SeekStart) 233 if err != nil { 234 return nil, err 235 } 236 } 237 238 return readers.NewLimitedReadCloser(cacheReader, limit), nil 239 } 240 241 // Update will change the object data 242 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 243 if err := o.refreshFromSource(ctx, false); err != nil { 244 return err 245 } 246 // pause background uploads if active 247 if o.CacheFs.opt.TempWritePath != "" { 248 o.CacheFs.backgroundRunner.pause() 249 defer o.CacheFs.backgroundRunner.play() 250 // don't allow started uploads 251 if o.isTempFile() && o.tempFileStartedUpload() { 252 return errors.Errorf("%v is currently uploading, can't update", o) 253 } 254 } 255 fs.Debugf(o, "updating object contents with size %v", src.Size()) 256 257 // FIXME use reliable upload 258 err := o.Object.Update(ctx, in, src, options...) 259 if err != nil { 260 fs.Errorf(o, "error updating source: %v", err) 261 return err 262 } 263 264 // deleting cached chunks and info to be replaced with new ones 265 _ = o.CacheFs.cache.RemoveObject(o.abs()) 266 // advertise to ChangeNotify if wrapped doesn't do that 267 o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) 268 269 o.CacheModTime = src.ModTime(ctx).UnixNano() 270 o.CacheSize = src.Size() 271 o.CacheHashes = make(map[hash.Type]string) 272 o.CacheTs = time.Now() 273 o.persist() 274 275 return nil 276 } 277 278 // Remove deletes the object from both the cache and the source 279 func (o *Object) Remove(ctx context.Context) error { 280 if err := o.refreshFromSource(ctx, false); err != nil { 281 return err 282 } 283 // pause background uploads if active 284 if o.CacheFs.opt.TempWritePath != "" { 285 o.CacheFs.backgroundRunner.pause() 286 defer o.CacheFs.backgroundRunner.play() 287 // don't allow started uploads 288 if o.isTempFile() && o.tempFileStartedUpload() { 289 return errors.Errorf("%v is currently uploading, can't delete", o) 290 } 291 } 292 err := o.Object.Remove(ctx) 293 if err != nil { 294 return err 295 } 296 297 fs.Debugf(o, "removing object") 298 _ = o.CacheFs.cache.RemoveObject(o.abs()) 299 _ = o.CacheFs.cache.removePendingUpload(o.abs()) 300 parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote()))) 301 _ = o.CacheFs.cache.ExpireDir(parentCd) 302 // advertise to ChangeNotify if wrapped doesn't do that 303 o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) 304 305 return nil 306 } 307 308 // Hash requests a hash of the object and stores in the cache 309 // since it might or might not be called, this is lazy loaded 310 func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { 311 _ = o.refresh(ctx) 312 if o.CacheHashes == nil { 313 o.CacheHashes = make(map[hash.Type]string) 314 } 315 316 cachedHash, found := o.CacheHashes[ht] 317 if found { 318 return cachedHash, nil 319 } 320 if err := o.refreshFromSource(ctx, false); err != nil { 321 return "", err 322 } 323 liveHash, err := o.Object.Hash(ctx, ht) 324 if err != nil { 325 return "", err 326 } 327 o.CacheHashes[ht] = liveHash 328 329 o.persist() 330 fs.Debugf(o, "object hash cached: %v", liveHash) 331 332 return liveHash, nil 333 } 334 335 // persist adds this object to the persistent cache 336 func (o *Object) persist() *Object { 337 err := o.CacheFs.cache.AddObject(o) 338 if err != nil { 339 fs.Errorf(o, "failed to cache object: %v", err) 340 } 341 return o 342 } 343 344 func (o *Object) isTempFile() bool { 345 _, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) 346 if err != nil { 347 o.CacheType = objectInCache 348 return false 349 } 350 351 o.CacheType = objectPendingUpload 352 return true 353 } 354 355 func (o *Object) tempFileStartedUpload() bool { 356 started, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) 357 if err != nil { 358 return false 359 } 360 return started 361 } 362 363 // UnWrap returns the Object that this Object is wrapping or 364 // nil if it isn't wrapping anything 365 func (o *Object) UnWrap() fs.Object { 366 return o.Object 367 } 368 369 var ( 370 _ fs.Object = (*Object)(nil) 371 _ fs.ObjectUnWrapper = (*Object)(nil) 372 )