github.com/artpar/rclone@v1.67.3/backend/cache/storage_persistent.go (about) 1 //go:build !plan9 && !js 2 3 package cache 4 5 import ( 6 "bytes" 7 "context" 8 "encoding/binary" 9 "encoding/json" 10 "fmt" 11 "os" 12 "path" 13 "strconv" 14 "strings" 15 "sync" 16 "time" 17 18 "github.com/artpar/rclone/fs" 19 "github.com/artpar/rclone/fs/walk" 20 bolt "go.etcd.io/bbolt" 21 ) 22 23 // Constants 24 const ( 25 RootBucket = "root" 26 RootTsBucket = "rootTs" 27 DataTsBucket = "dataTs" 28 tempBucket = "pending" 29 ) 30 31 // Features flags for this storage type 32 type Features struct { 33 PurgeDb bool // purge the db before starting 34 DbWaitTime time.Duration // time to wait for DB to be available 35 } 36 37 var boltMap = make(map[string]*Persistent) 38 var boltMapMx sync.Mutex 39 40 // GetPersistent returns a single instance for the specific store 41 func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) { 42 // write lock to create one 43 boltMapMx.Lock() 44 defer boltMapMx.Unlock() 45 if b, ok := boltMap[dbPath]; ok { 46 if !b.open { 47 err := b.connect() 48 if err != nil { 49 return nil, err 50 } 51 } 52 return b, nil 53 } 54 55 bb, err := newPersistent(dbPath, chunkPath, f) 56 if err != nil { 57 return nil, err 58 } 59 boltMap[dbPath] = bb 60 return boltMap[dbPath], nil 61 } 62 63 type chunkInfo struct { 64 Path string 65 Offset int64 66 Size int64 67 } 68 69 type tempUploadInfo struct { 70 DestPath string 71 AddedOn time.Time 72 Started bool 73 } 74 75 // String representation of a tempUploadInfo 76 func (t *tempUploadInfo) String() string { 77 return fmt.Sprintf("%v - %v (%v)", t.DestPath, t.Started, t.AddedOn) 78 } 79 80 // Persistent is a wrapper of persistent storage for a bolt.DB file 81 type Persistent struct { 82 dbPath string 83 dataPath string 84 open bool 85 db *bolt.DB 86 cleanupMux sync.Mutex 87 tempQueueMux sync.Mutex 88 features *Features 89 } 90 91 // newPersistent builds a new wrapper and connects to the bolt.DB file 92 func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) { 93 b := &Persistent{ 94 dbPath: dbPath, 95 dataPath: chunkPath, 96 features: f, 97 } 98 99 err := b.connect() 100 if err != nil { 101 fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err) 102 return nil, err 103 } 104 105 return b, nil 106 } 107 108 // String will return a human friendly string for this DB (currently the dbPath) 109 func (b *Persistent) String() string { 110 return "<Cache DB> " + b.dbPath 111 } 112 113 // connect creates a connection to the configured file 114 // refreshDb will delete the file before to create an empty DB if it's set to true 115 func (b *Persistent) connect() error { 116 var err error 117 118 err = os.MkdirAll(b.dataPath, os.ModePerm) 119 if err != nil { 120 return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err) 121 } 122 b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime}) 123 if err != nil { 124 return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err) 125 } 126 if b.features.PurgeDb { 127 b.Purge() 128 } 129 _ = b.db.Update(func(tx *bolt.Tx) error { 130 _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) 131 _, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket)) 132 _, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket)) 133 _, _ = tx.CreateBucketIfNotExists([]byte(tempBucket)) 134 135 return nil 136 }) 137 138 b.open = true 139 return nil 140 } 141 142 // getBucket prepares and cleans a specific path of the form: /var/tmp and will iterate through each path component 143 // to get to the nested bucket of the final part (in this example: tmp) 144 func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *bolt.Bucket { 145 cleanPath(dir) 146 147 entries := strings.FieldsFunc(dir, func(c rune) bool { 148 // cover Windows where rclone still uses '/' as path separator 149 // this should be safe as '/' is not a valid Windows character 150 return (os.PathSeparator == c || c == rune('/')) 151 }) 152 bucket := tx.Bucket([]byte(RootBucket)) 153 154 for _, entry := range entries { 155 if createIfMissing { 156 bucket, _ = bucket.CreateBucketIfNotExists([]byte(entry)) 157 } else { 158 bucket = bucket.Bucket([]byte(entry)) 159 } 160 161 if bucket == nil { 162 return nil 163 } 164 } 165 166 return bucket 167 } 168 169 // GetDir will retrieve data of a cached directory 170 func (b *Persistent) GetDir(remote string) (*Directory, error) { 171 cd := &Directory{} 172 173 err := b.db.View(func(tx *bolt.Tx) error { 174 bucket := b.getBucket(remote, false, tx) 175 if bucket == nil { 176 return fmt.Errorf("couldn't open bucket (%v)", remote) 177 } 178 179 data := bucket.Get([]byte(".")) 180 if data != nil { 181 return json.Unmarshal(data, cd) 182 } 183 184 return fmt.Errorf("%v not found", remote) 185 }) 186 187 return cd, err 188 } 189 190 // AddDir will update a CachedDirectory metadata and all its entries 191 func (b *Persistent) AddDir(cachedDir *Directory) error { 192 return b.AddBatchDir([]*Directory{cachedDir}) 193 } 194 195 // AddBatchDir will update a list of CachedDirectory metadata and all their entries 196 func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error { 197 if len(cachedDirs) == 0 { 198 return nil 199 } 200 201 return b.db.Update(func(tx *bolt.Tx) error { 202 var bucket *bolt.Bucket 203 if cachedDirs[0].Dir == "" { 204 bucket = tx.Bucket([]byte(RootBucket)) 205 } else { 206 bucket = b.getBucket(cachedDirs[0].Dir, true, tx) 207 } 208 if bucket == nil { 209 return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir) 210 } 211 212 for _, cachedDir := range cachedDirs { 213 var b *bolt.Bucket 214 var err error 215 if cachedDir.Name == "" { 216 b = bucket 217 } else { 218 b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name)) 219 } 220 if err != nil { 221 return err 222 } 223 224 encoded, err := json.Marshal(cachedDir) 225 if err != nil { 226 return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err) 227 } 228 err = b.Put([]byte("."), encoded) 229 if err != nil { 230 return err 231 } 232 } 233 return nil 234 }) 235 } 236 237 // GetDirEntries will return a CachedDirectory, its list of dir entries and/or an error if it encountered issues 238 func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) { 239 var dirEntries fs.DirEntries 240 241 err := b.db.View(func(tx *bolt.Tx) error { 242 bucket := b.getBucket(cachedDir.abs(), false, tx) 243 if bucket == nil { 244 return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs()) 245 } 246 247 val := bucket.Get([]byte(".")) 248 if val != nil { 249 err := json.Unmarshal(val, cachedDir) 250 if err != nil { 251 return fmt.Errorf("error during unmarshalling obj: %w", err) 252 } 253 } else { 254 return fmt.Errorf("missing cached dir: %v", cachedDir) 255 } 256 257 c := bucket.Cursor() 258 for k, v := c.First(); k != nil; k, v = c.Next() { 259 // ignore metadata key: . 260 if bytes.Equal(k, []byte(".")) { 261 continue 262 } 263 entryPath := path.Join(cachedDir.Remote(), string(k)) 264 265 if v == nil { // directory 266 // we try to find a cached meta for the dir 267 currentBucket := c.Bucket().Bucket(k) 268 if currentBucket == nil { 269 return fmt.Errorf("couldn't open bucket (%v)", string(k)) 270 } 271 272 metaKey := currentBucket.Get([]byte(".")) 273 d := NewDirectory(cachedDir.CacheFs, entryPath) 274 if metaKey != nil { //if we don't find it, we create an empty dir 275 err := json.Unmarshal(metaKey, d) 276 if err != nil { // if even this fails, we fallback to an empty dir 277 fs.Debugf(string(k), "error during unmarshalling obj: %v", err) 278 } 279 } 280 281 dirEntries = append(dirEntries, d) 282 } else { // object 283 o := NewObject(cachedDir.CacheFs, entryPath) 284 err := json.Unmarshal(v, o) 285 if err != nil { 286 fs.Debugf(string(k), "error during unmarshalling obj: %v", err) 287 continue 288 } 289 290 dirEntries = append(dirEntries, o) 291 } 292 } 293 294 return nil 295 }) 296 297 return dirEntries, err 298 } 299 300 // RemoveDir will delete a CachedDirectory, all its objects and all the chunks stored for it 301 func (b *Persistent) RemoveDir(fp string) error { 302 var err error 303 parentDir, dirName := path.Split(fp) 304 if fp == "" { 305 err = b.db.Update(func(tx *bolt.Tx) error { 306 err := tx.DeleteBucket([]byte(RootBucket)) 307 if err != nil { 308 fs.Debugf(fp, "couldn't delete from cache: %v", err) 309 return err 310 } 311 _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) 312 return nil 313 }) 314 } else { 315 err = b.db.Update(func(tx *bolt.Tx) error { 316 bucket := b.getBucket(cleanPath(parentDir), false, tx) 317 if bucket == nil { 318 return fmt.Errorf("couldn't open bucket (%v)", fp) 319 } 320 // delete the cached dir 321 err := bucket.DeleteBucket([]byte(cleanPath(dirName))) 322 if err != nil { 323 fs.Debugf(fp, "couldn't delete from cache: %v", err) 324 } 325 return nil 326 }) 327 } 328 329 // delete chunks on disk 330 // safe to ignore as the files might not have been open 331 if err == nil { 332 _ = os.RemoveAll(path.Join(b.dataPath, fp)) 333 _ = os.MkdirAll(b.dataPath, os.ModePerm) 334 } 335 336 return err 337 } 338 339 // ExpireDir will flush a CachedDirectory and all its objects from the objects 340 // chunks will remain as they are 341 func (b *Persistent) ExpireDir(cd *Directory) error { 342 t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge)) 343 cd.CacheTs = &t 344 345 // expire all parents 346 return b.db.Update(func(tx *bolt.Tx) error { 347 // expire all the parents 348 currentDir := cd.abs() 349 for { // until we get to the root 350 bucket := b.getBucket(currentDir, false, tx) 351 if bucket != nil { 352 val := bucket.Get([]byte(".")) 353 if val != nil { 354 cd2 := &Directory{CacheFs: cd.CacheFs} 355 err := json.Unmarshal(val, cd2) 356 if err == nil { 357 fs.Debugf(cd, "cache: expired %v", currentDir) 358 cd2.CacheTs = &t 359 enc2, _ := json.Marshal(cd2) 360 _ = bucket.Put([]byte("."), enc2) 361 } 362 } 363 } 364 if currentDir == "" { 365 break 366 } 367 currentDir = cleanPath(path.Dir(currentDir)) 368 } 369 return nil 370 }) 371 } 372 373 // GetObject will return a CachedObject from its parent directory or an error if it doesn't find it 374 func (b *Persistent) GetObject(cachedObject *Object) (err error) { 375 return b.db.View(func(tx *bolt.Tx) error { 376 bucket := b.getBucket(cachedObject.Dir, false, tx) 377 if bucket == nil { 378 return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir) 379 } 380 val := bucket.Get([]byte(cachedObject.Name)) 381 if val != nil { 382 return json.Unmarshal(val, cachedObject) 383 } 384 return fmt.Errorf("couldn't find object (%v)", cachedObject.Name) 385 }) 386 } 387 388 // AddObject will create a cached object in its parent directory 389 func (b *Persistent) AddObject(cachedObject *Object) error { 390 return b.db.Update(func(tx *bolt.Tx) error { 391 bucket := b.getBucket(cachedObject.Dir, true, tx) 392 if bucket == nil { 393 return fmt.Errorf("couldn't open parent bucket for %v", cachedObject) 394 } 395 // cache Object Info 396 encoded, err := json.Marshal(cachedObject) 397 if err != nil { 398 return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err) 399 } 400 err = bucket.Put([]byte(cachedObject.Name), encoded) 401 if err != nil { 402 return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err) 403 } 404 return nil 405 }) 406 } 407 408 // RemoveObject will delete a single cached object and all the chunks which belong to it 409 func (b *Persistent) RemoveObject(fp string) error { 410 parentDir, objName := path.Split(fp) 411 return b.db.Update(func(tx *bolt.Tx) error { 412 bucket := b.getBucket(cleanPath(parentDir), false, tx) 413 if bucket == nil { 414 return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir)) 415 } 416 err := bucket.Delete([]byte(cleanPath(objName))) 417 if err != nil { 418 fs.Debugf(fp, "couldn't delete obj from storage: %v", err) 419 } 420 // delete chunks on disk 421 // safe to ignore as the file might not have been open 422 _ = os.RemoveAll(path.Join(b.dataPath, fp)) 423 return nil 424 }) 425 } 426 427 // ExpireObject will flush an Object and all its data if desired 428 func (b *Persistent) ExpireObject(co *Object, withData bool) error { 429 co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge)) 430 err := b.AddObject(co) 431 if withData { 432 _ = os.RemoveAll(path.Join(b.dataPath, co.abs())) 433 } 434 return err 435 } 436 437 // HasEntry confirms the existence of a single entry (dir or object) 438 func (b *Persistent) HasEntry(remote string) bool { 439 dir, name := path.Split(remote) 440 dir = cleanPath(dir) 441 name = cleanPath(name) 442 443 err := b.db.View(func(tx *bolt.Tx) error { 444 bucket := b.getBucket(dir, false, tx) 445 if bucket == nil { 446 return fmt.Errorf("couldn't open parent bucket for %v", remote) 447 } 448 if f := bucket.Bucket([]byte(name)); f != nil { 449 return nil 450 } 451 if f := bucket.Get([]byte(name)); f != nil { 452 return nil 453 } 454 455 return fmt.Errorf("couldn't find object (%v)", remote) 456 }) 457 return err == nil 458 } 459 460 // HasChunk confirms the existence of a single chunk of an object 461 func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool { 462 fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) 463 if _, err := os.Stat(fp); !os.IsNotExist(err) { 464 return true 465 } 466 return false 467 } 468 469 // GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it 470 func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) { 471 var data []byte 472 473 fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) 474 data, err := os.ReadFile(fp) 475 if err != nil { 476 return nil, err 477 } 478 479 return data, err 480 } 481 482 // AddChunk adds a new chunk of a cached object 483 func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error { 484 _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) 485 486 filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) 487 err := os.WriteFile(filePath, data, os.ModePerm) 488 if err != nil { 489 return err 490 } 491 492 return b.db.Update(func(tx *bolt.Tx) error { 493 tsBucket := tx.Bucket([]byte(DataTsBucket)) 494 ts := time.Now() 495 found := false 496 497 // delete (older) timestamps for the same object 498 c := tsBucket.Cursor() 499 for k, v := c.First(); k != nil; k, v = c.Next() { 500 var ci chunkInfo 501 err = json.Unmarshal(v, &ci) 502 if err != nil { 503 continue 504 } 505 if ci.Path == fp && ci.Offset == offset { 506 if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found { 507 found = true 508 continue 509 } 510 err := c.Delete() 511 if err != nil { 512 fs.Debugf(fp, "failed to clean chunk: %v", err) 513 } 514 } 515 } 516 // don't overwrite if a newer one is already there 517 if found { 518 return nil 519 } 520 enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))}) 521 if err != nil { 522 fs.Debugf(fp, "failed to timestamp chunk: %v", err) 523 } 524 err = tsBucket.Put(itob(ts.UnixNano()), enc) 525 if err != nil { 526 fs.Debugf(fp, "failed to timestamp chunk: %v", err) 527 } 528 return nil 529 }) 530 } 531 532 // CleanChunksByAge will cleanup on a cron basis 533 func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) { 534 // NOOP 535 } 536 537 // CleanChunksByNeed is a noop for this implementation 538 func (b *Persistent) CleanChunksByNeed(offset int64) { 539 // noop: we want to clean a Bolt DB by time only 540 } 541 542 // CleanChunksBySize will cleanup chunks after the total size passes a certain point 543 func (b *Persistent) CleanChunksBySize(maxSize int64) { 544 b.cleanupMux.Lock() 545 defer b.cleanupMux.Unlock() 546 var cntChunks int 547 var roughlyCleaned fs.SizeSuffix 548 549 err := b.db.Update(func(tx *bolt.Tx) error { 550 dataTsBucket := tx.Bucket([]byte(DataTsBucket)) 551 if dataTsBucket == nil { 552 return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket) 553 } 554 // iterate through ts 555 c := dataTsBucket.Cursor() 556 totalSize := int64(0) 557 for k, v := c.First(); k != nil; k, v = c.Next() { 558 var ci chunkInfo 559 err := json.Unmarshal(v, &ci) 560 if err != nil { 561 continue 562 } 563 564 totalSize += ci.Size 565 } 566 567 if totalSize > maxSize { 568 needToClean := totalSize - maxSize 569 roughlyCleaned = fs.SizeSuffix(needToClean) 570 for k, v := c.First(); k != nil; k, v = c.Next() { 571 var ci chunkInfo 572 err := json.Unmarshal(v, &ci) 573 if err != nil { 574 continue 575 } 576 // delete this ts entry 577 err = c.Delete() 578 if err != nil { 579 fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err) 580 continue 581 } 582 err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10))) 583 if err == nil { 584 cntChunks++ 585 needToClean -= ci.Size 586 if needToClean <= 0 { 587 break 588 } 589 } 590 } 591 } 592 if cntChunks > 0 { 593 fs.Infof("cache-cleanup", "chunks %v, est. size: %v", cntChunks, roughlyCleaned.String()) 594 595 } 596 return nil 597 }) 598 599 if err != nil { 600 if err == bolt.ErrDatabaseNotOpen { 601 // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore 602 return 603 } 604 fs.Errorf("cache", "cleanup failed: %v", err) 605 } 606 } 607 608 // Stats returns a go map with the stats key values 609 func (b *Persistent) Stats() (map[string]map[string]interface{}, error) { 610 r := make(map[string]map[string]interface{}) 611 r["data"] = make(map[string]interface{}) 612 r["data"]["oldest-ts"] = time.Now() 613 r["data"]["oldest-file"] = "" 614 r["data"]["newest-ts"] = time.Now() 615 r["data"]["newest-file"] = "" 616 r["data"]["total-chunks"] = 0 617 r["data"]["total-size"] = int64(0) 618 r["files"] = make(map[string]interface{}) 619 r["files"]["oldest-ts"] = time.Now() 620 r["files"]["oldest-name"] = "" 621 r["files"]["newest-ts"] = time.Now() 622 r["files"]["newest-name"] = "" 623 r["files"]["total-files"] = 0 624 625 _ = b.db.View(func(tx *bolt.Tx) error { 626 dataTsBucket := tx.Bucket([]byte(DataTsBucket)) 627 rootTsBucket := tx.Bucket([]byte(RootTsBucket)) 628 629 var totalDirs int 630 var totalFiles int 631 _ = b.iterateBuckets(tx.Bucket([]byte(RootBucket)), func(name string) { 632 totalDirs++ 633 }, func(key string, val []byte) { 634 totalFiles++ 635 }) 636 r["files"]["total-dir"] = totalDirs 637 r["files"]["total-files"] = totalFiles 638 639 c := dataTsBucket.Cursor() 640 641 totalChunks := 0 642 totalSize := int64(0) 643 for k, v := c.First(); k != nil; k, v = c.Next() { 644 var ci chunkInfo 645 err := json.Unmarshal(v, &ci) 646 if err != nil { 647 continue 648 } 649 totalChunks++ 650 totalSize += ci.Size 651 } 652 r["data"]["total-chunks"] = totalChunks 653 r["data"]["total-size"] = totalSize 654 655 if k, v := c.First(); k != nil { 656 var ci chunkInfo 657 _ = json.Unmarshal(v, &ci) 658 r["data"]["oldest-ts"] = time.Unix(0, btoi(k)) 659 r["data"]["oldest-file"] = ci.Path 660 } 661 if k, v := c.Last(); k != nil { 662 var ci chunkInfo 663 _ = json.Unmarshal(v, &ci) 664 r["data"]["newest-ts"] = time.Unix(0, btoi(k)) 665 r["data"]["newest-file"] = ci.Path 666 } 667 668 c = rootTsBucket.Cursor() 669 if k, v := c.First(); k != nil { 670 // split to get (abs path - offset) 671 r["files"]["oldest-ts"] = time.Unix(0, btoi(k)) 672 r["files"]["oldest-name"] = string(v) 673 } 674 if k, v := c.Last(); k != nil { 675 r["files"]["newest-ts"] = time.Unix(0, btoi(k)) 676 r["files"]["newest-name"] = string(v) 677 } 678 679 return nil 680 }) 681 682 return r, nil 683 } 684 685 // Purge will flush the entire cache 686 func (b *Persistent) Purge() { 687 b.cleanupMux.Lock() 688 defer b.cleanupMux.Unlock() 689 690 _ = b.db.Update(func(tx *bolt.Tx) error { 691 _ = tx.DeleteBucket([]byte(RootBucket)) 692 _ = tx.DeleteBucket([]byte(RootTsBucket)) 693 _ = tx.DeleteBucket([]byte(DataTsBucket)) 694 695 _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) 696 _, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket)) 697 _, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket)) 698 699 return nil 700 }) 701 702 err := os.RemoveAll(b.dataPath) 703 if err != nil { 704 fs.Errorf(b, "issue removing data folder: %v", err) 705 } 706 err = os.MkdirAll(b.dataPath, os.ModePerm) 707 if err != nil { 708 fs.Errorf(b, "issue removing data folder: %v", err) 709 } 710 } 711 712 // GetChunkTs retrieves the current timestamp of this chunk 713 func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) { 714 var t time.Time 715 716 err := b.db.View(func(tx *bolt.Tx) error { 717 tsBucket := tx.Bucket([]byte(DataTsBucket)) 718 c := tsBucket.Cursor() 719 for k, v := c.First(); k != nil; k, v = c.Next() { 720 var ci chunkInfo 721 err := json.Unmarshal(v, &ci) 722 if err != nil { 723 continue 724 } 725 if ci.Path == path && ci.Offset == offset { 726 t = time.Unix(0, btoi(k)) 727 return nil 728 } 729 } 730 return fmt.Errorf("not found %v-%v", path, offset) 731 }) 732 733 return t, err 734 } 735 736 func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string), kvFn func(key string, val []byte)) error { 737 err := b.db.View(func(tx *bolt.Tx) error { 738 var c *bolt.Cursor 739 if buk == nil { 740 c = tx.Cursor() 741 } else { 742 c = buk.Cursor() 743 } 744 for k, v := c.First(); k != nil; k, v = c.Next() { 745 if v == nil { 746 var buk2 *bolt.Bucket 747 if buk == nil { 748 buk2 = tx.Bucket(k) 749 } else { 750 buk2 = buk.Bucket(k) 751 } 752 753 bucketFn(string(k)) 754 _ = b.iterateBuckets(buk2, bucketFn, kvFn) 755 } else { 756 kvFn(string(k), v) 757 } 758 } 759 return nil 760 }) 761 762 return err 763 } 764 765 // addPendingUpload adds a new file to the pending queue of uploads 766 func (b *Persistent) addPendingUpload(destPath string, started bool) error { 767 return b.db.Update(func(tx *bolt.Tx) error { 768 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 769 if err != nil { 770 return fmt.Errorf("couldn't bucket for %v", tempBucket) 771 } 772 tempObj := &tempUploadInfo{ 773 DestPath: destPath, 774 AddedOn: time.Now(), 775 Started: started, 776 } 777 778 // cache Object Info 779 encoded, err := json.Marshal(tempObj) 780 if err != nil { 781 return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err) 782 } 783 err = bucket.Put([]byte(destPath), encoded) 784 if err != nil { 785 return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err) 786 } 787 788 return nil 789 }) 790 } 791 792 // getPendingUpload returns the next file from the pending queue of uploads 793 func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (destPath string, err error) { 794 b.tempQueueMux.Lock() 795 defer b.tempQueueMux.Unlock() 796 797 err = b.db.Update(func(tx *bolt.Tx) error { 798 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 799 if err != nil { 800 return fmt.Errorf("couldn't bucket for %v", tempBucket) 801 } 802 803 c := bucket.Cursor() 804 for k, v := c.Seek([]byte(inRoot)); k != nil && bytes.HasPrefix(k, []byte(inRoot)); k, v = c.Next() { 805 //for k, v := c.First(); k != nil; k, v = c.Next() { 806 var tempObj = &tempUploadInfo{} 807 err = json.Unmarshal(v, tempObj) 808 if err != nil { 809 fs.Errorf(b, "failed to read pending upload: %v", err) 810 continue 811 } 812 // skip over started uploads 813 if tempObj.Started || time.Now().Before(tempObj.AddedOn.Add(waitTime)) { 814 continue 815 } 816 817 tempObj.Started = true 818 v2, err := json.Marshal(tempObj) 819 if err != nil { 820 fs.Errorf(b, "failed to update pending upload: %v", err) 821 continue 822 } 823 err = bucket.Put(k, v2) 824 if err != nil { 825 fs.Errorf(b, "failed to update pending upload: %v", err) 826 continue 827 } 828 829 destPath = tempObj.DestPath 830 return nil 831 } 832 833 return fmt.Errorf("no pending upload found") 834 }) 835 836 return destPath, err 837 } 838 839 // SearchPendingUpload returns the file info from the pending queue of uploads 840 func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error) { 841 err = b.db.View(func(tx *bolt.Tx) error { 842 bucket := tx.Bucket([]byte(tempBucket)) 843 if bucket == nil { 844 return fmt.Errorf("couldn't bucket for %v", tempBucket) 845 } 846 847 var tempObj = &tempUploadInfo{} 848 v := bucket.Get([]byte(remote)) 849 err = json.Unmarshal(v, tempObj) 850 if err != nil { 851 return fmt.Errorf("pending upload (%v) not found %v", remote, err) 852 } 853 854 started = tempObj.Started 855 return nil 856 }) 857 858 return started, err 859 } 860 861 // searchPendingUploadFromDir files currently pending upload from a single dir 862 func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, err error) { 863 err = b.db.View(func(tx *bolt.Tx) error { 864 bucket := tx.Bucket([]byte(tempBucket)) 865 if bucket == nil { 866 return fmt.Errorf("couldn't bucket for %v", tempBucket) 867 } 868 869 c := bucket.Cursor() 870 for k, v := c.First(); k != nil; k, v = c.Next() { 871 var tempObj = &tempUploadInfo{} 872 err = json.Unmarshal(v, tempObj) 873 if err != nil { 874 fs.Errorf(b, "failed to read pending upload: %v", err) 875 continue 876 } 877 parentDir := cleanPath(path.Dir(tempObj.DestPath)) 878 if dir == parentDir { 879 remotes = append(remotes, tempObj.DestPath) 880 } 881 } 882 883 return nil 884 }) 885 886 return remotes, err 887 } 888 889 func (b *Persistent) rollbackPendingUpload(remote string) error { 890 b.tempQueueMux.Lock() 891 defer b.tempQueueMux.Unlock() 892 893 return b.db.Update(func(tx *bolt.Tx) error { 894 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 895 if err != nil { 896 return fmt.Errorf("couldn't bucket for %v", tempBucket) 897 } 898 var tempObj = &tempUploadInfo{} 899 v := bucket.Get([]byte(remote)) 900 err = json.Unmarshal(v, tempObj) 901 if err != nil { 902 return fmt.Errorf("pending upload (%v) not found: %w", remote, err) 903 } 904 tempObj.Started = false 905 v2, err := json.Marshal(tempObj) 906 if err != nil { 907 return fmt.Errorf("pending upload not updated: %w", err) 908 } 909 err = bucket.Put([]byte(tempObj.DestPath), v2) 910 if err != nil { 911 return fmt.Errorf("pending upload not updated: %w", err) 912 } 913 return nil 914 }) 915 } 916 917 func (b *Persistent) removePendingUpload(remote string) error { 918 b.tempQueueMux.Lock() 919 defer b.tempQueueMux.Unlock() 920 921 return b.db.Update(func(tx *bolt.Tx) error { 922 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 923 if err != nil { 924 return fmt.Errorf("couldn't bucket for %v", tempBucket) 925 } 926 return bucket.Delete([]byte(remote)) 927 }) 928 } 929 930 // updatePendingUpload allows to update an existing item in the queue while checking if it's not started in the same 931 // transaction. If it is started, it will not allow the update 932 func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUploadInfo) error) error { 933 b.tempQueueMux.Lock() 934 defer b.tempQueueMux.Unlock() 935 936 return b.db.Update(func(tx *bolt.Tx) error { 937 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 938 if err != nil { 939 return fmt.Errorf("couldn't bucket for %v", tempBucket) 940 } 941 942 var tempObj = &tempUploadInfo{} 943 v := bucket.Get([]byte(remote)) 944 err = json.Unmarshal(v, tempObj) 945 if err != nil { 946 return fmt.Errorf("pending upload (%v) not found %v", remote, err) 947 } 948 if tempObj.Started { 949 return fmt.Errorf("pending upload already started %v", remote) 950 } 951 err = fn(tempObj) 952 if err != nil { 953 return err 954 } 955 if remote != tempObj.DestPath { 956 err := bucket.Delete([]byte(remote)) 957 if err != nil { 958 return err 959 } 960 // if this is removed then the entry can be removed too 961 if tempObj.DestPath == "" { 962 return nil 963 } 964 } 965 v2, err := json.Marshal(tempObj) 966 if err != nil { 967 return fmt.Errorf("pending upload not updated: %w", err) 968 } 969 err = bucket.Put([]byte(tempObj.DestPath), v2) 970 if err != nil { 971 return fmt.Errorf("pending upload not updated: %w", err) 972 } 973 974 return nil 975 }) 976 } 977 978 // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue 979 func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { 980 return b.db.Update(func(tx *bolt.Tx) error { 981 _ = tx.DeleteBucket([]byte(tempBucket)) 982 bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) 983 if err != nil { 984 return err 985 } 986 987 var queuedEntries []fs.Object 988 err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { 989 for _, o := range entries { 990 if oo, ok := o.(fs.Object); ok { 991 queuedEntries = append(queuedEntries, oo) 992 } 993 } 994 return nil 995 }) 996 if err != nil { 997 return err 998 } 999 1000 fs.Debugf(cacheFs, "reconciling temporary uploads") 1001 for _, queuedEntry := range queuedEntries { 1002 destPath := path.Join(cacheFs.Root(), queuedEntry.Remote()) 1003 tempObj := &tempUploadInfo{ 1004 DestPath: destPath, 1005 AddedOn: time.Now(), 1006 Started: false, 1007 } 1008 1009 // cache Object Info 1010 encoded, err := json.Marshal(tempObj) 1011 if err != nil { 1012 return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err) 1013 } 1014 err = bucket.Put([]byte(destPath), encoded) 1015 if err != nil { 1016 return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err) 1017 } 1018 fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath) 1019 } 1020 1021 return nil 1022 }) 1023 } 1024 1025 // Close should be called when the program ends gracefully 1026 func (b *Persistent) Close() { 1027 b.cleanupMux.Lock() 1028 defer b.cleanupMux.Unlock() 1029 1030 err := b.db.Close() 1031 if err != nil { 1032 fs.Errorf(b, "closing handle: %v", err) 1033 } 1034 b.open = false 1035 } 1036 1037 // itob returns an 8-byte big endian representation of v. 1038 func itob(v int64) []byte { 1039 b := make([]byte, 8) 1040 binary.BigEndian.PutUint64(b, uint64(v)) 1041 return b 1042 } 1043 1044 func btoi(d []byte) int64 { 1045 return int64(binary.BigEndian.Uint64(d)) 1046 }