github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/filefabric/filefabric.go (about) 1 // Package filefabric provides an interface to Storage Made Easy's 2 // Enterprise File Fabric storage system. 3 package filefabric 4 5 /* 6 Docs: https://product-demo.smestorage.com/?p=apidoc 7 8 Missing features: 9 - M-Stream support 10 - Oauth-like flow (soon being changed to oauth) 11 12 // TestFileFabric 13 maxFileLength = 14094 14 */ 15 16 import ( 17 "bytes" 18 "context" 19 "encoding/base64" 20 "errors" 21 "fmt" 22 "io" 23 "net/http" 24 "net/url" 25 "path" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "github.com/rclone/rclone/lib/atexit" 32 "github.com/rclone/rclone/lib/encoder" 33 "github.com/rclone/rclone/lib/random" 34 35 "github.com/rclone/rclone/backend/filefabric/api" 36 "github.com/rclone/rclone/fs" 37 "github.com/rclone/rclone/fs/config" 38 "github.com/rclone/rclone/fs/config/configmap" 39 "github.com/rclone/rclone/fs/config/configstruct" 40 "github.com/rclone/rclone/fs/fserrors" 41 "github.com/rclone/rclone/fs/fshttp" 42 "github.com/rclone/rclone/fs/hash" 43 "github.com/rclone/rclone/fs/log" 44 "github.com/rclone/rclone/lib/dircache" 45 "github.com/rclone/rclone/lib/pacer" 46 "github.com/rclone/rclone/lib/rest" 47 ) 48 49 const ( 50 minSleep = 20 * time.Millisecond 51 maxSleep = 10 * time.Second 52 decayConstant = 2 // bigger for slower decay, exponential 53 listChunks = 1000 // chunk size to read directory listings 54 tokenLifeTime = 55 * time.Minute // 1 hour minus a bit of leeway 55 defaultRootID = "" // default root ID 56 emptyMimeType = "application/vnd.rclone.empty.file" 57 ) 58 59 // Register with Fs 60 func init() { 61 fs.Register(&fs.RegInfo{ 62 Name: "filefabric", 63 Description: "Enterprise File Fabric", 64 NewFs: NewFs, 65 Options: []fs.Option{{ 66 Name: "url", 67 Help: "URL of the Enterprise File Fabric to connect to.", 68 Required: true, 69 Examples: []fs.OptionExample{{ 70 Value: "https://storagemadeeasy.com", 71 Help: "Storage Made Easy US", 72 }, { 73 Value: "https://eu.storagemadeeasy.com", 74 Help: "Storage Made Easy EU", 75 }, { 76 Value: "https://yourfabric.smestorage.com", 77 Help: "Connect to your Enterprise File Fabric", 78 }}, 79 }, { 80 Name: "root_folder_id", 81 Help: `ID of the root folder. 82 83 Leave blank normally. 84 85 Fill in to make rclone start with directory of a given ID. 86 `, 87 Sensitive: true, 88 }, { 89 Name: "permanent_token", 90 Help: `Permanent Authentication Token. 91 92 A Permanent Authentication Token can be created in the Enterprise File 93 Fabric, on the users Dashboard under Security, there is an entry 94 you'll see called "My Authentication Tokens". Click the Manage button 95 to create one. 96 97 These tokens are normally valid for several years. 98 99 For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens 100 `, 101 Sensitive: true, 102 }, { 103 Name: "token", 104 Help: `Session Token. 105 106 This is a session token which rclone caches in the config file. It is 107 usually valid for 1 hour. 108 109 Don't set this value - rclone will set it automatically. 110 `, 111 Advanced: true, 112 Sensitive: true, 113 }, { 114 Name: "token_expiry", 115 Help: `Token expiry time. 116 117 Don't set this value - rclone will set it automatically. 118 `, 119 Advanced: true, 120 }, { 121 Name: "version", 122 Help: `Version read from the file fabric. 123 124 Don't set this value - rclone will set it automatically. 125 `, 126 Advanced: true, 127 }, { 128 Name: config.ConfigEncoding, 129 Help: config.ConfigEncodingHelp, 130 Advanced: true, 131 Default: (encoder.Display | 132 encoder.EncodeInvalidUtf8), 133 }}, 134 }) 135 } 136 137 // Options defines the configuration for this backend 138 type Options struct { 139 URL string `config:"url"` 140 RootFolderID string `config:"root_folder_id"` 141 PermanentToken string `config:"permanent_token"` 142 Token string `config:"token"` 143 TokenExpiry string `config:"token_expiry"` 144 Version string `config:"version"` 145 Enc encoder.MultiEncoder `config:"encoding"` 146 } 147 148 // Fs represents a remote filefabric 149 type Fs struct { 150 name string // name of this remote 151 root string // the path we are working on 152 opt Options // parsed options 153 features *fs.Features // optional features 154 m configmap.Mapper // to save config 155 srv *rest.Client // the connection to the server 156 dirCache *dircache.DirCache // Map of directory path to directory id 157 pacer *fs.Pacer // pacer for API calls 158 tokenMu sync.Mutex // hold when reading the token 159 token string // current access token 160 tokenExpiry time.Time // time the current token expires 161 tokenExpired atomic.Int32 162 canCopyWithName bool // set if detected that can use fi_name in copy 163 precision time.Duration // precision reported 164 } 165 166 // Object describes a filefabric object 167 // 168 // Will definitely have info but maybe not meta 169 type Object struct { 170 fs *Fs // what this object is part of 171 remote string // The remote path 172 hasMetaData bool // whether info below has been set 173 size int64 // size of the object 174 modTime time.Time // modification time of the object 175 id string // ID of the object 176 contentType string // ContentType of object 177 } 178 179 // ------------------------------------------------------------ 180 181 // Name of the remote (as passed into NewFs) 182 func (f *Fs) Name() string { 183 return f.name 184 } 185 186 // Root of the remote (as passed into NewFs) 187 func (f *Fs) Root() string { 188 return f.root 189 } 190 191 // String converts this Fs to a string 192 func (f *Fs) String() string { 193 return fmt.Sprintf("filefabric root '%s'", f.root) 194 } 195 196 // Features returns the optional features of this Fs 197 func (f *Fs) Features() *fs.Features { 198 return f.features 199 } 200 201 // parsePath parses a filefabric 'url' 202 func parsePath(path string) (root string) { 203 root = strings.Trim(path, "/") 204 return 205 } 206 207 // retryErrorCodes is a slice of error codes that we will retry 208 var retryErrorCodes = []int{ 209 429, // Too Many Requests. 210 500, // Internal Server Error 211 502, // Bad Gateway 212 503, // Service Unavailable 213 504, // Gateway Timeout 214 509, // Bandwidth Limit Exceeded 215 } 216 217 // Retry any of these 218 var retryStatusCodes = []struct { 219 code string 220 sleep time.Duration 221 }{ 222 { 223 // Can not create folder now. We are not able to complete the 224 // requested operation with such name. We are processing 225 // delete in that folder. Please try again later or use 226 // another name. (error_background) 227 code: "error_background", 228 sleep: 1 * time.Second, 229 }, 230 } 231 232 // shouldRetry returns a boolean as to whether this resp and err 233 // deserve to be retried. It returns the err as a convenience 234 // try should be the number of the tries so far, counting up from 1 235 func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) { 236 if fserrors.ContextError(ctx, &err) { 237 return false, err 238 } 239 if err != nil { 240 return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err 241 } 242 if status != nil && !status.OK() { 243 err = status // return the error from the RPC 244 code := status.GetCode() 245 if code == "login_token_expired" { 246 f.tokenExpired.Add(1) 247 } else { 248 for _, retryCode := range retryStatusCodes { 249 if code == retryCode.code { 250 if retryCode.sleep > 0 { 251 // make this thread only sleep exponentially increasing extra time 252 sleepTime := retryCode.sleep << (try - 1) 253 fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code) 254 time.Sleep(sleepTime) 255 } 256 return true, err 257 } 258 } 259 } 260 } 261 return false, err 262 } 263 264 // readMetaDataForPath reads the metadata from the path 265 func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string) (info *api.Item, err error) { 266 var resp api.FileResponse 267 _, err = f.rpc(ctx, "checkPathExists", params{ 268 "path": f.opt.Enc.FromStandardPath(path), 269 "pid": rootID, 270 }, &resp, nil) 271 if err != nil { 272 return nil, fmt.Errorf("failed to check path exists: %w", err) 273 } 274 if resp.Exists != "y" { 275 return nil, fs.ErrorObjectNotFound 276 } 277 return &resp.Item, nil 278 279 /* 280 // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) 281 leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) 282 if err != nil { 283 if err == fs.ErrorDirNotFound { 284 return nil, fs.ErrorObjectNotFound 285 } 286 return nil, err 287 } 288 289 found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { 290 if item.Name == leaf { 291 info = item 292 return true 293 } 294 return false 295 }) 296 if err != nil { 297 return nil, err 298 } 299 if !found { 300 return nil, fs.ErrorObjectNotFound 301 } 302 return info, nil 303 */ 304 } 305 306 // Get the appliance info so we can set Version 307 func (f *Fs) getApplianceInfo(ctx context.Context) error { 308 var applianceInfo api.ApplianceInfo 309 _, err := f.rpc(ctx, "getApplianceInfo", params{ 310 "token": "*", 311 }, &applianceInfo, nil) 312 if err != nil { 313 return fmt.Errorf("failed to read appliance version: %w", err) 314 } 315 f.opt.Version = applianceInfo.SoftwareVersionLabel 316 f.m.Set("version", f.opt.Version) 317 return nil 318 } 319 320 // Gets the token or gets a new one if necessary 321 func (f *Fs) getToken(ctx context.Context) (token string, err error) { 322 f.tokenMu.Lock() 323 var refreshed = false 324 defer func() { 325 if refreshed { 326 f.tokenExpired.Store(0) 327 } 328 f.tokenMu.Unlock() 329 }() 330 331 expired := f.tokenExpired.Load() != 0 332 if expired { 333 fs.Debugf(f, "Token invalid - refreshing") 334 } 335 if f.token == "" { 336 fs.Debugf(f, "Empty token - refreshing") 337 expired = true 338 } 339 now := time.Now() 340 if f.tokenExpiry.IsZero() || now.After(f.tokenExpiry) { 341 fs.Debugf(f, "Token expired - refreshing") 342 expired = true 343 } 344 if !expired { 345 return f.token, nil 346 } 347 348 var info api.GetTokenByAuthTokenResponse 349 _, err = f.rpc(ctx, "getTokenByAuthToken", params{ 350 "token": "*", 351 "authtoken": f.opt.PermanentToken, 352 }, &info, nil) 353 if err != nil { 354 return "", fmt.Errorf("failed to get session token: %w", err) 355 } 356 refreshed = true 357 now = now.Add(tokenLifeTime) 358 f.token = info.Token 359 f.tokenExpiry = now 360 f.m.Set("token", f.token) 361 f.m.Set("token_expiry", now.Format(time.RFC3339)) 362 363 // Read appliance info when we update the token 364 err = f.getApplianceInfo(ctx) 365 if err != nil { 366 return "", err 367 } 368 f.setCapabilities() 369 370 return f.token, nil 371 } 372 373 // params for rpc 374 type params map[string]interface{} 375 376 // rpc calls the rpc.php method of the SME file fabric 377 // 378 // This is an entry point to all the method calls. 379 // 380 // If result is nil then resp.Body will need closing 381 func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) { 382 defer log.Trace(f, "%s(%+v) options=%+v", function, p, options)("result=%+v, err=%v", &result, &err) 383 384 // Get the token from params if present otherwise call getToken 385 var token string 386 if tokenI, ok := p["token"]; !ok { 387 token, err = f.getToken(ctx) 388 if err != nil { 389 return resp, err 390 } 391 } else { 392 token = tokenI.(string) 393 } 394 var data = url.Values{ 395 "function": {function}, 396 "token": {token}, 397 "apiformat": {"json"}, 398 } 399 for k, v := range p { 400 data.Set(k, fmt.Sprint(v)) 401 } 402 opts := rest.Opts{ 403 Method: "POST", 404 Path: "/api/rpc.php", 405 ContentType: "application/x-www-form-urlencoded", 406 Options: options, 407 } 408 try := 0 409 err = f.pacer.Call(func() (bool, error) { 410 try++ 411 // Refresh the body each retry 412 opts.Body = strings.NewReader(data.Encode()) 413 resp, err = f.srv.CallJSON(ctx, &opts, nil, result) 414 return f.shouldRetry(ctx, resp, err, result, try) 415 }) 416 if err != nil { 417 return resp, err 418 } 419 return resp, nil 420 } 421 422 // NewFs constructs an Fs from the path, container:path 423 func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { 424 // Parse config into Options struct 425 opt := new(Options) 426 err := configstruct.Set(m, opt) 427 if err != nil { 428 return nil, err 429 } 430 431 opt.URL = strings.TrimSuffix(opt.URL, "/") 432 if opt.URL == "" { 433 return nil, errors.New("url must be set") 434 } 435 436 root = parsePath(root) 437 438 client := fshttp.NewClient(ctx) 439 440 f := &Fs{ 441 name: name, 442 root: root, 443 opt: *opt, 444 m: m, 445 srv: rest.NewClient(client).SetRoot(opt.URL), 446 pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), 447 token: opt.Token, 448 } 449 f.features = (&fs.Features{ 450 CaseInsensitive: true, 451 CanHaveEmptyDirectories: true, 452 ReadMimeType: true, 453 WriteMimeType: true, 454 }).Fill(ctx, f) 455 if f.opt.Version == "" { 456 err = f.getApplianceInfo(ctx) 457 if err != nil { 458 return nil, err 459 } 460 } 461 f.setCapabilities() 462 463 if opt.TokenExpiry != "" { 464 tokenExpiry, err := time.Parse(time.RFC3339, opt.TokenExpiry) 465 if err != nil { 466 fs.Errorf(nil, "Failed to parse token_expiry option: %v", err) 467 } else { 468 f.tokenExpiry = tokenExpiry 469 } 470 } 471 472 if opt.RootFolderID == "" { 473 opt.RootFolderID = defaultRootID 474 } 475 476 f.dirCache = dircache.New(f.root, opt.RootFolderID, f) 477 478 // Find out whether the root is a file or a directory or doesn't exist 479 var errReturn error 480 if f.root != "" { 481 info, err := f.readMetaDataForPath(ctx, f.opt.RootFolderID, f.root) 482 if err == nil && info != nil { 483 if info.Type == api.ItemTypeFile { 484 // Root is a file 485 // Point the root to the parent directory 486 f.root, _ = dircache.SplitPath(root) 487 f.dirCache = dircache.New(f.root, opt.RootFolderID, f) 488 errReturn = fs.ErrorIsFile 489 // Cache the ID of the parent of the file as the root ID 490 f.dirCache.Put(f.root, info.PID) 491 } else if info.Type == api.ItemTypeFolder { 492 // Root is a dir - cache its ID 493 f.dirCache.Put(f.root, info.ID) 494 } 495 //} else { 496 // Root is not found so a directory 497 } 498 } 499 return f, errReturn 500 } 501 502 // set the capabilities of this version of software 503 func (f *Fs) setCapabilities() { 504 version := f.opt.Version 505 if version == "" { 506 version = "0000.00" 507 } 508 if version >= "2006.02" { 509 f.precision = time.Second 510 f.canCopyWithName = true 511 } else { 512 // times can be altered this much on renames 513 f.precision = 1 * time.Hour 514 f.canCopyWithName = false 515 } 516 } 517 518 // Return an Object from a path 519 // 520 // If it can't be found it returns the error fs.ErrorObjectNotFound. 521 func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { 522 o := &Object{ 523 fs: f, 524 remote: remote, 525 } 526 var err error 527 if info != nil { 528 // Set info 529 err = o.setMetaData(info) 530 } else { 531 err = o.readMetaData(ctx) // reads info and meta, returning an error 532 } 533 if err != nil { 534 return nil, err 535 } 536 return o, nil 537 } 538 539 // NewObject finds the Object at remote. If it can't be found 540 // it returns the error fs.ErrorObjectNotFound. 541 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 542 return f.newObjectWithInfo(ctx, remote, nil) 543 } 544 545 // FindLeaf finds a directory of name leaf in the folder with ID pathID 546 func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { 547 // Find the leaf in pathID 548 found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { 549 if strings.EqualFold(item.Name, leaf) { 550 pathIDOut = item.ID 551 return true 552 } 553 return false 554 }) 555 return pathIDOut, found, err 556 } 557 558 // CreateDir makes a directory with pathID as parent and name leaf 559 func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { 560 //fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) 561 var info api.DoCreateNewFolderResponse 562 _, err = f.rpc(ctx, "doCreateNewFolder", params{ 563 "fi_pid": pathID, 564 "fi_name": f.opt.Enc.FromStandardName(leaf), 565 }, &info, nil) 566 if err != nil { 567 return "", fmt.Errorf("failed to create directory: %w", err) 568 } 569 // fmt.Printf("...Id %q\n", *info.Id) 570 return info.Item.ID, nil 571 } 572 573 // list the objects into the function supplied 574 // 575 // If directories is set it only sends directories 576 // User function to process a File item from listAll 577 // 578 // Should return true to finish processing 579 type listAllFn func(*api.Item) bool 580 581 // Lists the directory required calling the user function on each item found 582 // 583 // If the user fn ever returns true then it early exits with found = true 584 func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { 585 var ( 586 p = params{ 587 "fi_pid": dirID, 588 "count": listChunks, 589 "subfolders": "y", 590 // Cut down the things that are returned 591 "options": "filelist|" + api.ItemFields, 592 } 593 n = 0 594 ) 595 OUTER: 596 for { 597 var info api.GetFolderContentsResponse 598 _, err = f.rpc(ctx, "getFolderContents", p, &info, nil) 599 if err != nil { 600 return false, fmt.Errorf("failed to list directory: %w", err) 601 } 602 for i := range info.Items { 603 item := &info.Items[i] 604 if item.Type == api.ItemTypeFolder { 605 if filesOnly { 606 continue 607 } 608 } else if item.Type == api.ItemTypeFile { 609 if directoriesOnly { 610 continue 611 } 612 } else { 613 fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) 614 continue 615 } 616 if item.Trash { 617 continue 618 } 619 item.Name = f.opt.Enc.ToStandardName(item.Name) 620 if fn(item) { 621 found = true 622 break OUTER 623 } 624 } 625 // if didn't get any items then exit 626 if len(info.Items) == 0 { 627 break 628 } 629 n += len(info.Items) 630 if n >= info.Total { 631 break 632 } 633 p["from"] = n 634 } 635 636 return found, nil 637 } 638 639 // List the objects and directories in dir into entries. The 640 // entries can be returned in any order but should be for a 641 // complete directory. 642 // 643 // dir should be "" to list the root, and should not have 644 // trailing slashes. 645 // 646 // This should return ErrDirNotFound if the directory isn't 647 // found. 648 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 649 directoryID, err := f.dirCache.FindDir(ctx, dir, false) 650 if err != nil { 651 return nil, err 652 } 653 var iErr error 654 _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { 655 remote := path.Join(dir, info.Name) 656 if info.Type == api.ItemTypeFolder { 657 // cache the directory ID for later lookups 658 f.dirCache.Put(remote, info.ID) 659 d := fs.NewDir(remote, time.Time(info.Modified)).SetID(info.ID).SetItems(info.SubFolders) 660 entries = append(entries, d) 661 } else if info.Type == api.ItemTypeFile { 662 o, err := f.newObjectWithInfo(ctx, remote, info) 663 if err != nil { 664 iErr = err 665 return true 666 } 667 entries = append(entries, o) 668 } 669 return false 670 }) 671 if err != nil { 672 return nil, err 673 } 674 if iErr != nil { 675 return nil, iErr 676 } 677 return entries, nil 678 } 679 680 // Creates from the parameters passed in a half finished Object which 681 // must have setMetaData called on it 682 // 683 // Returns the object, leaf, directoryID and error. 684 // 685 // Used to create new objects 686 func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { 687 // Create the directory for the object if it doesn't exist 688 leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) 689 if err != nil { 690 return 691 } 692 // Temporary Object under construction 693 o = &Object{ 694 fs: f, 695 remote: remote, 696 } 697 return o, leaf, directoryID, nil 698 } 699 700 // Put the object 701 // 702 // Copy the reader in to the new object which is returned. 703 // 704 // The new object may have been created if an error is returned 705 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 706 remote := src.Remote() 707 size := src.Size() 708 modTime := src.ModTime(ctx) 709 710 o, _, _, err := f.createObject(ctx, remote, modTime, size) 711 if err != nil { 712 return nil, err 713 } 714 return o, o.Update(ctx, in, src, options...) 715 } 716 717 // Mkdir creates the container if it doesn't exist 718 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 719 _, err := f.dirCache.FindDir(ctx, dir, true) 720 return err 721 } 722 723 // deleteObject removes an object by ID 724 func (f *Fs) deleteObject(ctx context.Context, id string) (err error) { 725 var info api.DeleteResponse 726 _, err = f.rpc(ctx, "doDeleteFile", params{ 727 "fi_id": id, 728 "completedeletion": "n", 729 }, &info, nil) 730 if err != nil { 731 return fmt.Errorf("failed to delete file: %w", err) 732 } 733 return nil 734 } 735 736 // purgeCheck removes the root directory, if check is set then it 737 // refuses to do so if it has anything in 738 func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { 739 root := path.Join(f.root, dir) 740 if root == "" { 741 return errors.New("can't purge root directory") 742 } 743 dc := f.dirCache 744 rootID, err := dc.FindDir(ctx, dir, false) 745 if err != nil { 746 return err 747 } 748 749 if check { 750 found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { 751 fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) 752 return true 753 }) 754 if err != nil { 755 return err 756 } 757 if found { 758 return fs.ErrorDirectoryNotEmpty 759 } 760 } 761 762 var info api.EmptyResponse 763 _, err = f.rpc(ctx, "doDeleteFolder", params{ 764 "fi_id": rootID, 765 }, &info, nil) 766 f.dirCache.FlushDir(dir) 767 if err != nil { 768 return fmt.Errorf("failed to remove directory: %w", err) 769 } 770 return nil 771 } 772 773 // Rmdir deletes the root folder 774 // 775 // Returns an error if it isn't empty 776 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 777 //fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) 778 return f.purgeCheck(ctx, dir, true) 779 } 780 781 // Precision return the precision of this Fs 782 func (f *Fs) Precision() time.Duration { 783 return f.precision 784 } 785 786 // Copy src to this remote using server side copy operations. 787 // 788 // This is stored with the remote path given. 789 // 790 // It returns the destination Object and a possible error. 791 // 792 // Will only be called if src.Fs().Name() == f.Name() 793 // 794 // If it isn't possible then return fs.ErrorCantCopy 795 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 796 srcObj, ok := src.(*Object) 797 if !ok { 798 fs.Debugf(src, "Can't copy - not same remote type") 799 return nil, fs.ErrorCantCopy 800 } 801 err := srcObj.readMetaData(ctx) 802 if err != nil { 803 return nil, err 804 } 805 806 // Create temporary object 807 dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 808 if err != nil { 809 return nil, err 810 } 811 812 if !f.canCopyWithName && leaf != path.Base(srcObj.remote) { 813 fs.Debugf(src, "Can't copy - can't change the name of files") 814 return nil, fs.ErrorCantCopy 815 } 816 817 // Copy the object 818 var info api.FileResponse 819 p := params{ 820 "fi_id": srcObj.id, 821 "fi_pid": directoryID, 822 "force": "y", 823 "options": "allownoextension", // without this the filefabric adds extensions to files without 824 } 825 if f.canCopyWithName { 826 p["fi_name"] = f.opt.Enc.FromStandardName(leaf) 827 } 828 _, err = f.rpc(ctx, "doCopyFile", p, &info, nil) 829 if err != nil { 830 return nil, fmt.Errorf("failed to copy file: %w", err) 831 } 832 err = dstObj.setMetaData(&info.Item) 833 if err != nil { 834 return nil, err 835 } 836 return dstObj, nil 837 } 838 839 // Purge deletes all the files and the container 840 // 841 // Optional interface: Only implement this if you have a way of 842 // deleting all the files quicker than just running Remove() on the 843 // result of List() 844 func (f *Fs) Purge(ctx context.Context, dir string) error { 845 return f.purgeCheck(ctx, dir, false) 846 } 847 848 // Wait for the background task to complete if necessary 849 func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) { 850 if taskID == "" || taskID == "0" { 851 // No task to wait for 852 return nil 853 } 854 start := time.Now() 855 sleepTime := time.Second 856 for { 857 var info api.TasksResponse 858 _, err = f.rpc(ctx, "getUserBackgroundTasks", params{ 859 "taskid": taskID, 860 }, &info, nil) 861 if err != nil { 862 return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err) 863 } 864 if len(info.Tasks) == 0 { 865 // task has finished 866 break 867 } 868 if len(info.Tasks) > 1 { 869 fs.Errorf(f, "Unexpected number of tasks returned %d", len(info.Tasks)) 870 } 871 task := info.Tasks[0] 872 if task.BtStatus == "c" { 873 // task completed 874 break 875 } 876 dt := time.Since(start) 877 fs.Debugf(f, "Waiting for task ID %s: %s: to completed for %v - waited %v already", task.BtID, task.BtTitle, sleepTime, dt) 878 time.Sleep(sleepTime) 879 } 880 return nil 881 } 882 883 // Rename the leaf of a file or directory in a directory 884 func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf string) (item *api.Item, err error) { 885 var info api.FileResponse 886 method := "doRenameFile" 887 if isDir { 888 method = "doRenameFolder" 889 } 890 _, err = f.rpc(ctx, method, params{ 891 "fi_id": id, 892 "fi_name": newLeaf, 893 }, &info, nil) 894 if err != nil { 895 return nil, fmt.Errorf("failed to rename leaf: %w", err) 896 } 897 err = f.waitForBackgroundTask(ctx, info.Status.TaskID) 898 if err != nil { 899 return nil, err 900 } 901 return &info.Item, nil 902 } 903 904 // move a file or folder 905 // 906 // This is complicated by the fact that there is an API to move files 907 // between directories and a separate one to rename them. We try to 908 // call the minimum number of API calls. 909 func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) { 910 newLeaf = f.opt.Enc.FromStandardName(newLeaf) 911 oldLeaf = f.opt.Enc.FromStandardName(oldLeaf) 912 doRenameLeaf := oldLeaf != newLeaf 913 doMove := oldDirectoryID != newDirectoryID 914 915 // Now rename the leaf to a temporary name if we are moving to 916 // another directory to make sure we don't overwrite something 917 // in the destination directory by accident 918 if doRenameLeaf && doMove { 919 tmpLeaf := newLeaf + "." + random.String(8) 920 item, err = f.renameLeaf(ctx, isDir, id, tmpLeaf) 921 if err != nil { 922 return nil, err 923 } 924 } 925 926 // Move the object to a new directory (with the existing name) 927 // if required 928 if doMove { 929 var info api.MoveFilesResponse 930 method := "doMoveFiles" 931 if isDir { 932 method = "doMoveFolders" 933 } 934 _, err = f.rpc(ctx, method, params{ 935 "fi_ids": id, 936 "dir_id": newDirectoryID, 937 }, &info, nil) 938 if err != nil { 939 return nil, fmt.Errorf("failed to move file to new directory: %w", err) 940 } 941 item = &info.Item 942 err = f.waitForBackgroundTask(ctx, info.Status.TaskID) 943 if err != nil { 944 return nil, err 945 } 946 } 947 948 // Rename the leaf to its final name if required 949 if doRenameLeaf { 950 item, err = f.renameLeaf(ctx, isDir, id, newLeaf) 951 if err != nil { 952 return nil, err 953 } 954 } 955 956 return item, nil 957 } 958 959 // Move src to this remote using server side move operations. 960 // 961 // This is stored with the remote path given. 962 // 963 // It returns the destination Object and a possible error. 964 // 965 // Will only be called if src.Fs().Name() == f.Name() 966 // 967 // If it isn't possible then return fs.ErrorCantMove 968 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 969 srcObj, ok := src.(*Object) 970 if !ok { 971 fs.Debugf(src, "Can't move - not same remote type") 972 return nil, fs.ErrorCantMove 973 } 974 975 // find the source directoryID 976 srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) 977 if err != nil { 978 return nil, err 979 } 980 981 // Create temporary object 982 dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 983 if err != nil { 984 return nil, err 985 } 986 987 // Do the move 988 item, err := f.move(ctx, false, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) 989 if err != nil { 990 return nil, err 991 } 992 993 // Set the metadata from what was returned or read it fresh 994 if item == nil { 995 err = dstObj.readMetaData(ctx) 996 if err != nil { 997 return nil, err 998 } 999 } else { 1000 err = dstObj.setMetaData(item) 1001 if err != nil { 1002 return nil, err 1003 } 1004 } 1005 return dstObj, nil 1006 } 1007 1008 // DirMove moves src, srcRemote to this remote at dstRemote 1009 // using server side move operations. 1010 // 1011 // Will only be called if src.Fs().Name() == f.Name() 1012 // 1013 // If it isn't possible then return fs.ErrorCantDirMove 1014 // 1015 // If destination exists then return fs.ErrorDirExists 1016 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 1017 srcFs, ok := src.(*Fs) 1018 if !ok { 1019 fs.Debugf(srcFs, "Can't move directory - not same remote type") 1020 return fs.ErrorCantDirMove 1021 } 1022 1023 srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) 1024 if err != nil { 1025 return err 1026 } 1027 1028 // Do the move 1029 _, err = f.move(ctx, true, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) 1030 if err != nil { 1031 return err 1032 } 1033 srcFs.dirCache.FlushDir(srcRemote) 1034 return nil 1035 } 1036 1037 // CleanUp empties the trash 1038 func (f *Fs) CleanUp(ctx context.Context) (err error) { 1039 var info api.EmptyResponse 1040 _, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil) 1041 if err != nil { 1042 return fmt.Errorf("failed to empty trash: %w", err) 1043 } 1044 return nil 1045 } 1046 1047 // DirCacheFlush resets the directory cache - used in testing as an 1048 // optional interface 1049 func (f *Fs) DirCacheFlush() { 1050 f.dirCache.ResetRoot() 1051 } 1052 1053 // Hashes returns the supported hash sets. 1054 func (f *Fs) Hashes() hash.Set { 1055 return hash.Set(hash.None) 1056 } 1057 1058 // ------------------------------------------------------------ 1059 1060 // Fs returns the parent Fs 1061 func (o *Object) Fs() fs.Info { 1062 return o.fs 1063 } 1064 1065 // Return a string version 1066 func (o *Object) String() string { 1067 if o == nil { 1068 return "<nil>" 1069 } 1070 return o.remote 1071 } 1072 1073 // Remote returns the remote path 1074 func (o *Object) Remote() string { 1075 return o.remote 1076 } 1077 1078 // Hash of the object in the requested format as a lowercase hex string 1079 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 1080 return "", hash.ErrUnsupported 1081 } 1082 1083 // Size returns the size of an object in bytes 1084 func (o *Object) Size() int64 { 1085 err := o.readMetaData(context.TODO()) 1086 if err != nil { 1087 fs.Logf(o, "Failed to read metadata: %v", err) 1088 return 0 1089 } 1090 if o.contentType == emptyMimeType { 1091 return 0 1092 } 1093 return o.size 1094 } 1095 1096 // setMetaData sets the metadata from info 1097 func (o *Object) setMetaData(info *api.Item) (err error) { 1098 if info.Type != api.ItemTypeFile { 1099 return fs.ErrorIsDir 1100 } 1101 o.hasMetaData = true 1102 o.size = info.Size 1103 o.modTime = time.Time(info.Modified) 1104 if !time.Time(info.LocalTime).IsZero() { 1105 o.modTime = time.Time(info.LocalTime) 1106 } 1107 o.id = info.ID 1108 o.contentType = info.ContentType 1109 return nil 1110 } 1111 1112 // readMetaData gets the metadata if it hasn't already been fetched 1113 // 1114 // it also sets the info 1115 func (o *Object) readMetaData(ctx context.Context) (err error) { 1116 if o.hasMetaData { 1117 return nil 1118 } 1119 rootID, err := o.fs.dirCache.RootID(ctx, false) 1120 if err != nil { 1121 if err == fs.ErrorDirNotFound { 1122 err = fs.ErrorObjectNotFound 1123 } 1124 return err 1125 } 1126 info, err := o.fs.readMetaDataForPath(ctx, rootID, o.remote) 1127 if err != nil { 1128 if apiErr, ok := err.(*api.Status); ok { 1129 if apiErr.Code == "not_found" || apiErr.Code == "trashed" { 1130 return fs.ErrorObjectNotFound 1131 } 1132 } 1133 return err 1134 } 1135 return o.setMetaData(info) 1136 } 1137 1138 // ModTime returns the modification time of the object 1139 // 1140 // It attempts to read the objects mtime and if that isn't present the 1141 // LastModified returned in the http headers 1142 func (o *Object) ModTime(ctx context.Context) time.Time { 1143 err := o.readMetaData(ctx) 1144 if err != nil { 1145 fs.Logf(o, "Failed to read metadata: %v", err) 1146 return time.Now() 1147 } 1148 return o.modTime 1149 } 1150 1151 // modifyFile updates file metadata 1152 // 1153 // keyValues should be key, value pairs 1154 func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error { 1155 var info api.FileResponse 1156 var data strings.Builder 1157 for _, keyValue := range keyValues { 1158 data.WriteString(keyValue[0]) 1159 data.WriteRune('=') 1160 data.WriteString(keyValue[1]) 1161 data.WriteRune('\n') 1162 } 1163 _, err := o.fs.rpc(ctx, "doModifyFile", params{ 1164 "fi_id": o.id, 1165 "data": data.String(), 1166 }, &info, nil) 1167 if err != nil { 1168 return fmt.Errorf("failed to update metadata: %w", err) 1169 } 1170 return o.setMetaData(&info.Item) 1171 } 1172 1173 // SetModTime sets the modification time of the local fs object 1174 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { 1175 return o.modifyFile(ctx, [][2]string{ 1176 {"fi_localtime", api.Time(modTime).String()}, 1177 }) 1178 } 1179 1180 // Storable returns a boolean showing whether this object storable 1181 func (o *Object) Storable() bool { 1182 return true 1183 } 1184 1185 // Open an object for read 1186 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { 1187 if o.id == "" { 1188 return nil, errors.New("can't download - no id") 1189 } 1190 if o.contentType == emptyMimeType { 1191 return io.NopCloser(bytes.NewReader([]byte{})), nil 1192 } 1193 fs.FixRangeOption(options, o.size) 1194 resp, err := o.fs.rpc(ctx, "getFile", params{ 1195 "fi_id": o.id, 1196 }, nil, options) 1197 if err != nil { 1198 return nil, err 1199 } 1200 return resp.Body, nil 1201 } 1202 1203 // Update the object with the contents of the io.Reader, modTime and size 1204 // 1205 // If existing is set then it updates the object rather than creating a new one. 1206 // 1207 // The new object may have been created if an error is returned 1208 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { 1209 modTime := src.ModTime(ctx) 1210 remote := o.remote 1211 size := src.Size() 1212 1213 // Can't upload 0 length files - these upload as a single 1214 // space. 1215 // if size == 0 { 1216 // return fs.ErrorCantUploadEmptyFiles 1217 // } 1218 1219 // Create the directory for the object if it doesn't exist 1220 leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) 1221 if err != nil { 1222 return err 1223 } 1224 1225 // Initialise the upload 1226 var upload api.DoInitUploadResponse 1227 timestamp := api.Time(modTime).String() 1228 encodedLeaf := o.fs.opt.Enc.FromStandardName(leaf) 1229 base64EncodedLeaf := base64.StdEncoding.EncodeToString([]byte(encodedLeaf)) 1230 contentType := fs.MimeType(ctx, src) 1231 if size == 0 { 1232 contentType = emptyMimeType 1233 } 1234 p := params{ 1235 "fi_name": encodedLeaf, 1236 "fi_pid": directoryID, 1237 "fi_filename": encodedLeaf, 1238 "fi_localtime": timestamp, 1239 "fi_modified": timestamp, 1240 "fi_contenttype": contentType, 1241 "responsetype": "json", // make the upload.cgi return JSON 1242 "directuploadsupport": "n", // FIXME should we support this? 1243 // "chunkifbig": "n", // FIXME multipart? 1244 } 1245 // Set the size if known 1246 if size >= 0 { 1247 p["fi_size"] = size 1248 } 1249 _, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil) 1250 if err != nil { 1251 return fmt.Errorf("failed to initialize upload: %w", err) 1252 } 1253 1254 // Cancel the upload if aborted or it fails 1255 finalized := false 1256 defer atexit.OnError(&err, func() { 1257 if finalized { 1258 return 1259 } 1260 fs.Debugf(o, "Cancelling upload %s", upload.UploadCode) 1261 var cancel api.EmptyResponse 1262 _, fErr := o.fs.rpc(ctx, "doAbortUpload", params{ 1263 "uploadcode": upload.UploadCode, 1264 }, &cancel, nil) 1265 if fErr != nil { 1266 fs.Errorf(o, "failed to cancel upload: %v", fErr) 1267 } 1268 })() 1269 1270 // Post the file with the upload code 1271 var uploader api.UploaderResponse 1272 opts := rest.Opts{ 1273 //Method: "POST", 1274 Method: "PUT", 1275 Path: "/cgi-bin/uploader/uploader1.cgi/" + base64EncodedLeaf + "?" + upload.UploadCode, 1276 Body: in, 1277 ContentType: contentType, 1278 // MultipartParams: url.Values{}, 1279 // MultipartContentName: "file", 1280 // MultipartFileName: "datafile", 1281 } 1282 // Set the size if known 1283 if size >= 0 { 1284 var contentLength = size 1285 opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty 1286 } 1287 try := 0 1288 err = o.fs.pacer.CallNoRetry(func() (bool, error) { 1289 try++ 1290 resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader) 1291 return o.fs.shouldRetry(ctx, resp, err, nil, try) 1292 }) 1293 if err != nil { 1294 return fmt.Errorf("failed to upload: %w", err) 1295 } 1296 if uploader.Success != "y" { 1297 return fmt.Errorf("upload failed") 1298 } 1299 if size > 0 && uploader.FileSize != size { 1300 return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize) 1301 } 1302 1303 // Now finalize the file 1304 var finalize api.DoCompleteUploadResponse 1305 p = params{ 1306 "uploadcode": upload.UploadCode, 1307 "remotetime": timestamp, 1308 "fi_size": uploader.FileSize, 1309 } 1310 _, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil) 1311 if err != nil { 1312 return fmt.Errorf("failed to finalize upload: %w", err) 1313 } 1314 finalized = true 1315 1316 err = o.setMetaData(&finalize.File) 1317 if err != nil { 1318 return err 1319 } 1320 1321 // Make sure content type is correct 1322 if o.contentType != contentType { 1323 fs.Debugf(o, "Correcting mime type from %q to %q", o.contentType, contentType) 1324 return o.modifyFile(ctx, [][2]string{ 1325 {"fi_contenttype", contentType}, 1326 }) 1327 } 1328 1329 return nil 1330 } 1331 1332 // Remove an object 1333 func (o *Object) Remove(ctx context.Context) error { 1334 return o.fs.deleteObject(ctx, o.id) 1335 } 1336 1337 // ID returns the ID of the Object if known, or "" if not 1338 func (o *Object) ID() string { 1339 return o.id 1340 } 1341 1342 // MimeType returns the content type of the Object if 1343 // known, or "" if not 1344 func (o *Object) MimeType(ctx context.Context) string { 1345 return o.contentType 1346 } 1347 1348 // Check the interfaces are satisfied 1349 var ( 1350 _ fs.Fs = (*Fs)(nil) 1351 _ fs.Purger = (*Fs)(nil) 1352 _ fs.Copier = (*Fs)(nil) 1353 _ fs.Mover = (*Fs)(nil) 1354 _ fs.DirMover = (*Fs)(nil) 1355 _ fs.DirCacheFlusher = (*Fs)(nil) 1356 _ fs.CleanUpper = (*Fs)(nil) 1357 _ fs.Object = (*Object)(nil) 1358 _ fs.IDer = (*Object)(nil) 1359 _ fs.MimeTyper = (*Object)(nil) 1360 )