github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/cache/object.go (about) 1 // +build !plan9 2 3 package cache 4 5 import ( 6 "context" 7 "io" 8 "path" 9 "sync" 10 "time" 11 12 "github.com/pkg/errors" 13 "github.com/rclone/rclone/fs" 14 "github.com/rclone/rclone/fs/hash" 15 "github.com/rclone/rclone/lib/readers" 16 ) 17 18 const ( 19 objectInCache = "Object" 20 objectPendingUpload = "TempObject" 21 ) 22 23 // Object is a generic file like object that stores basic information about it 24 type Object struct { 25 fs.Object `json:"-"` 26 27 ParentFs fs.Fs `json:"-"` // parent fs 28 CacheFs *Fs `json:"-"` // cache fs 29 Name string `json:"name"` // name of the directory 30 Dir string `json:"dir"` // abs path of the object 31 CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown 32 CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown 33 CacheStorable bool `json:"storable"` // says whether this object can be stored 34 CacheType string `json:"cacheType"` 35 CacheTs time.Time `json:"cacheTs"` 36 cacheHashesMu sync.Mutex 37 CacheHashes map[hash.Type]string // all supported hashes cached 38 39 refreshMutex sync.Mutex 40 } 41 42 // NewObject builds one from a generic fs.Object 43 func NewObject(f *Fs, remote string) *Object { 44 fullRemote := path.Join(f.Root(), remote) 45 dir, name := path.Split(fullRemote) 46 47 cacheType := objectInCache 48 parentFs := f.UnWrap() 49 if f.opt.TempWritePath != "" { 50 _, err := f.cache.SearchPendingUpload(fullRemote) 51 if err == nil { // queued for upload 52 cacheType = objectPendingUpload 53 parentFs = f.tempFs 54 fs.Debugf(fullRemote, "pending upload found") 55 } 56 } 57 58 co := &Object{ 59 ParentFs: parentFs, 60 CacheFs: f, 61 Name: cleanPath(name), 62 Dir: cleanPath(dir), 63 CacheModTime: time.Now().UnixNano(), 64 CacheSize: 0, 65 CacheStorable: false, 66 CacheType: cacheType, 67 CacheTs: time.Now(), 68 } 69 return co 70 } 71 72 // ObjectFromOriginal builds one from a generic fs.Object 73 func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { 74 var co *Object 75 fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) 76 dir, name := path.Split(fullRemote) 77 78 cacheType := objectInCache 79 parentFs := f.UnWrap() 80 if f.opt.TempWritePath != "" { 81 _, err := f.cache.SearchPendingUpload(fullRemote) 82 if err == nil { // queued for upload 83 cacheType = objectPendingUpload 84 parentFs = f.tempFs 85 fs.Debugf(fullRemote, "pending upload found") 86 } 87 } 88 89 co = &Object{ 90 ParentFs: parentFs, 91 CacheFs: f, 92 Name: cleanPath(name), 93 Dir: cleanPath(dir), 94 CacheType: cacheType, 95 CacheTs: time.Now(), 96 } 97 co.updateData(ctx, o) 98 return co 99 } 100 101 func (o *Object) updateData(ctx context.Context, source fs.Object) { 102 o.Object = source 103 o.CacheModTime = source.ModTime(ctx).UnixNano() 104 o.CacheSize = source.Size() 105 o.CacheStorable = source.Storable() 106 o.CacheTs = time.Now() 107 o.cacheHashesMu.Lock() 108 o.CacheHashes = make(map[hash.Type]string) 109 o.cacheHashesMu.Unlock() 110 } 111 112 // Fs returns its FS info 113 func (o *Object) Fs() fs.Info { 114 return o.CacheFs 115 } 116 117 // String returns a human friendly name for this object 118 func (o *Object) String() string { 119 if o == nil { 120 return "<nil>" 121 } 122 return o.Remote() 123 } 124 125 // Remote returns the remote path 126 func (o *Object) Remote() string { 127 p := path.Join(o.Dir, o.Name) 128 return o.CacheFs.cleanRootFromPath(p) 129 } 130 131 // abs returns the absolute path to the object 132 func (o *Object) abs() string { 133 return path.Join(o.Dir, o.Name) 134 } 135 136 // ModTime returns the cached ModTime 137 func (o *Object) ModTime(ctx context.Context) time.Time { 138 _ = o.refresh(ctx) 139 return time.Unix(0, o.CacheModTime) 140 } 141 142 // Size returns the cached Size 143 func (o *Object) Size() int64 { 144 _ = o.refresh(context.TODO()) 145 return o.CacheSize 146 } 147 148 // Storable returns the cached Storable 149 func (o *Object) Storable() bool { 150 _ = o.refresh(context.TODO()) 151 return o.CacheStorable 152 } 153 154 // refresh will check if the object info is expired and request the info from source if it is 155 // all these conditions must be true to ignore a refresh 156 // 1. cache ts didn't expire yet 157 // 2. is not pending a notification from the wrapped fs 158 func (o *Object) refresh(ctx context.Context) error { 159 isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) 160 isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) 161 if !isExpired && !isNotified { 162 return nil 163 } 164 165 return o.refreshFromSource(ctx, true) 166 } 167 168 // refreshFromSource requests the original FS for the object in case it comes from a cached entry 169 func (o *Object) refreshFromSource(ctx context.Context, force bool) error { 170 o.refreshMutex.Lock() 171 defer o.refreshMutex.Unlock() 172 var err error 173 var liveObject fs.Object 174 175 if o.Object != nil && !force { 176 return nil 177 } 178 if o.isTempFile() { 179 liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) 180 err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) 181 } else { 182 liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) 183 err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) 184 } 185 if err != nil { 186 fs.Errorf(o, "error refreshing object in : %v", err) 187 return err 188 } 189 o.updateData(ctx, liveObject) 190 o.persist() 191 192 return nil 193 } 194 195 // SetModTime sets the ModTime of this object 196 func (o *Object) SetModTime(ctx context.Context, t time.Time) error { 197 if err := o.refreshFromSource(ctx, false); err != nil { 198 return err 199 } 200 201 err := o.Object.SetModTime(ctx, t) 202 if err != nil { 203 return err 204 } 205 206 o.CacheModTime = t.UnixNano() 207 o.persist() 208 fs.Debugf(o, "updated ModTime: %v", t) 209 210 return nil 211 } 212 213 // Open is used to request a specific part of the file using fs.RangeOption 214 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { 215 var err error 216 217 if o.Object == nil { 218 err = o.refreshFromSource(ctx, true) 219 } else { 220 err = o.refresh(ctx) 221 } 222 if err != nil { 223 return nil, err 224 } 225 226 cacheReader := NewObjectHandle(ctx, o, o.CacheFs) 227 var offset, limit int64 = 0, -1 228 for _, option := range options { 229 switch x := option.(type) { 230 case *fs.SeekOption: 231 offset = x.Offset 232 case *fs.RangeOption: 233 offset, limit = x.Decode(o.Size()) 234 } 235 _, err = cacheReader.Seek(offset, io.SeekStart) 236 if err != nil { 237 return nil, err 238 } 239 } 240 241 return readers.NewLimitedReadCloser(cacheReader, limit), nil 242 } 243 244 // Update will change the object data 245 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 246 if err := o.refreshFromSource(ctx, false); err != nil { 247 return err 248 } 249 // pause background uploads if active 250 if o.CacheFs.opt.TempWritePath != "" { 251 o.CacheFs.backgroundRunner.pause() 252 defer o.CacheFs.backgroundRunner.play() 253 // don't allow started uploads 254 if o.isTempFile() && o.tempFileStartedUpload() { 255 return errors.Errorf("%v is currently uploading, can't update", o) 256 } 257 } 258 fs.Debugf(o, "updating object contents with size %v", src.Size()) 259 260 // FIXME use reliable upload 261 err := o.Object.Update(ctx, in, src, options...) 262 if err != nil { 263 fs.Errorf(o, "error updating source: %v", err) 264 return err 265 } 266 267 // deleting cached chunks and info to be replaced with new ones 268 _ = o.CacheFs.cache.RemoveObject(o.abs()) 269 // advertise to ChangeNotify if wrapped doesn't do that 270 o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) 271 272 o.CacheModTime = src.ModTime(ctx).UnixNano() 273 o.CacheSize = src.Size() 274 o.cacheHashesMu.Lock() 275 o.CacheHashes = make(map[hash.Type]string) 276 o.cacheHashesMu.Unlock() 277 o.CacheTs = time.Now() 278 o.persist() 279 280 return nil 281 } 282 283 // Remove deletes the object from both the cache and the source 284 func (o *Object) Remove(ctx context.Context) error { 285 if err := o.refreshFromSource(ctx, false); err != nil { 286 return err 287 } 288 // pause background uploads if active 289 if o.CacheFs.opt.TempWritePath != "" { 290 o.CacheFs.backgroundRunner.pause() 291 defer o.CacheFs.backgroundRunner.play() 292 // don't allow started uploads 293 if o.isTempFile() && o.tempFileStartedUpload() { 294 return errors.Errorf("%v is currently uploading, can't delete", o) 295 } 296 } 297 err := o.Object.Remove(ctx) 298 if err != nil { 299 return err 300 } 301 302 fs.Debugf(o, "removing object") 303 _ = o.CacheFs.cache.RemoveObject(o.abs()) 304 _ = o.CacheFs.cache.removePendingUpload(o.abs()) 305 parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote()))) 306 _ = o.CacheFs.cache.ExpireDir(parentCd) 307 // advertise to ChangeNotify if wrapped doesn't do that 308 o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) 309 310 return nil 311 } 312 313 // Hash requests a hash of the object and stores in the cache 314 // since it might or might not be called, this is lazy loaded 315 func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { 316 _ = o.refresh(ctx) 317 o.cacheHashesMu.Lock() 318 if o.CacheHashes == nil { 319 o.CacheHashes = make(map[hash.Type]string) 320 } 321 cachedHash, found := o.CacheHashes[ht] 322 o.cacheHashesMu.Unlock() 323 if found { 324 return cachedHash, nil 325 } 326 if err := o.refreshFromSource(ctx, false); err != nil { 327 return "", err 328 } 329 liveHash, err := o.Object.Hash(ctx, ht) 330 if err != nil { 331 return "", err 332 } 333 o.cacheHashesMu.Lock() 334 o.CacheHashes[ht] = liveHash 335 o.cacheHashesMu.Unlock() 336 337 o.persist() 338 fs.Debugf(o, "object hash cached: %v", liveHash) 339 340 return liveHash, nil 341 } 342 343 // persist adds this object to the persistent cache 344 func (o *Object) persist() *Object { 345 err := o.CacheFs.cache.AddObject(o) 346 if err != nil { 347 fs.Errorf(o, "failed to cache object: %v", err) 348 } 349 return o 350 } 351 352 func (o *Object) isTempFile() bool { 353 _, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) 354 if err != nil { 355 o.CacheType = objectInCache 356 return false 357 } 358 359 o.CacheType = objectPendingUpload 360 return true 361 } 362 363 func (o *Object) tempFileStartedUpload() bool { 364 started, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) 365 if err != nil { 366 return false 367 } 368 return started 369 } 370 371 // UnWrap returns the Object that this Object is wrapping or 372 // nil if it isn't wrapping anything 373 func (o *Object) UnWrap() fs.Object { 374 return o.Object 375 } 376 377 var ( 378 _ fs.Object = (*Object)(nil) 379 _ fs.ObjectUnWrapper = (*Object)(nil) 380 )