github.com/artpar/rclone@v1.67.3/backend/protondrive/protondrive.go (about) 1 // Package protondrive implements the Proton Drive backend 2 package protondrive 3 4 import ( 5 "context" 6 "errors" 7 "fmt" 8 "io" 9 "path" 10 "strings" 11 "time" 12 13 protonDriveAPI "github.com/henrybear327/Proton-API-Bridge" 14 "github.com/henrybear327/go-proton-api" 15 16 "github.com/artpar/rclone/fs" 17 "github.com/artpar/rclone/fs/config" 18 "github.com/artpar/rclone/fs/config/configmap" 19 "github.com/artpar/rclone/fs/config/configstruct" 20 "github.com/artpar/rclone/fs/config/obscure" 21 "github.com/artpar/rclone/fs/hash" 22 "github.com/artpar/rclone/lib/dircache" 23 "github.com/artpar/rclone/lib/encoder" 24 "github.com/artpar/rclone/lib/pacer" 25 "github.com/artpar/rclone/lib/readers" 26 ) 27 28 /* 29 - dirCache operates on relative path to root 30 - path sanitization 31 - rule of thumb: sanitize before use, but store things as-is 32 - the paths cached in dirCache are after sanitizing 33 - the remote/dir passed in aren't, and are stored as-is 34 */ 35 36 const ( 37 minSleep = 10 * time.Millisecond 38 maxSleep = 2 * time.Second 39 decayConstant = 2 // bigger for slower decay, exponential 40 41 clientUIDKey = "client_uid" 42 clientAccessTokenKey = "client_access_token" 43 clientRefreshTokenKey = "client_refresh_token" 44 clientSaltedKeyPassKey = "client_salted_key_pass" 45 ) 46 47 var ( 48 errCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size") 49 errCanNotPurgeRootDirectory = errors.New("can't purge root directory") 50 51 // for the auth/deauth handler 52 _mapper configmap.Mapper 53 _saltedKeyPass string 54 ) 55 56 // Register with Fs 57 func init() { 58 fs.Register(&fs.RegInfo{ 59 Name: "protondrive", 60 Description: "Proton Drive", 61 NewFs: NewFs, 62 Options: []fs.Option{{ 63 Name: "username", 64 Help: `The username of your proton account`, 65 Required: true, 66 }, { 67 Name: "password", 68 Help: "The password of your proton account.", 69 Required: true, 70 IsPassword: true, 71 }, { 72 Name: "mailbox_password", 73 Help: `The mailbox password of your two-password proton account. 74 75 For more information regarding the mailbox password, please check the 76 following official knowledge base article: 77 https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password 78 `, 79 IsPassword: true, 80 Advanced: true, 81 }, { 82 Name: "2fa", 83 Help: `The 2FA code 84 85 The value can also be provided with --protondrive-2fa=000000 86 87 The 2FA code of your proton drive account if the account is set up with 88 two-factor authentication`, 89 Required: false, 90 }, { 91 Name: clientUIDKey, 92 Help: "Client uid key (internal use only)", 93 Required: false, 94 Advanced: true, 95 Sensitive: true, 96 Hide: fs.OptionHideBoth, 97 }, { 98 Name: clientAccessTokenKey, 99 Help: "Client access token key (internal use only)", 100 Required: false, 101 Advanced: true, 102 Sensitive: true, 103 Hide: fs.OptionHideBoth, 104 }, { 105 Name: clientRefreshTokenKey, 106 Help: "Client refresh token key (internal use only)", 107 Required: false, 108 Advanced: true, 109 Sensitive: true, 110 Hide: fs.OptionHideBoth, 111 }, { 112 Name: clientSaltedKeyPassKey, 113 Help: "Client salted key pass key (internal use only)", 114 Required: false, 115 Advanced: true, 116 Sensitive: true, 117 Hide: fs.OptionHideBoth, 118 }, { 119 Name: config.ConfigEncoding, 120 Help: config.ConfigEncodingHelp, 121 Advanced: true, 122 Default: (encoder.Base | 123 encoder.EncodeInvalidUtf8 | 124 encoder.EncodeLeftSpace | 125 encoder.EncodeRightSpace), 126 }, { 127 Name: "original_file_size", 128 Help: `Return the file size before encryption 129 130 The size of the encrypted file will be different from (bigger than) the 131 original file size. Unless there is a reason to return the file size 132 after encryption is performed, otherwise, set this option to true, as 133 features like Open() which will need to be supplied with original content 134 size, will fail to operate properly`, 135 Advanced: true, 136 Default: true, 137 }, { 138 Name: "app_version", 139 Help: `The app version string 140 141 The app version string indicates the client that is currently performing 142 the API request. This information is required and will be sent with every 143 API request.`, 144 Advanced: true, 145 Default: "macos-drive@1.0.0-alpha.1+rclone", 146 }, { 147 Name: "replace_existing_draft", 148 Help: `Create a new revision when filename conflict is detected 149 150 When a file upload is cancelled or failed before completion, a draft will be 151 created and the subsequent upload of the same file to the same location will be 152 reported as a conflict. 153 154 The value can also be set by --protondrive-replace-existing-draft=true 155 156 If the option is set to true, the draft will be replaced and then the upload 157 operation will restart. If there are other clients also uploading at the same 158 file location at the same time, the behavior is currently unknown. Need to set 159 to true for integration tests. 160 If the option is set to false, an error "a draft exist - usually this means a 161 file is being uploaded at another client, or, there was a failed upload attempt" 162 will be returned, and no upload will happen.`, 163 Advanced: true, 164 Default: false, 165 }, { 166 Name: "enable_caching", 167 Help: `Caches the files and folders metadata to reduce API calls 168 169 Notice: If you are mounting ProtonDrive as a VFS, please disable this feature, 170 as the current implementation doesn't update or clear the cache when there are 171 external changes. 172 173 The files and folders on ProtonDrive are represented as links with keyrings, 174 which can be cached to improve performance and be friendly to the API server. 175 176 The cache is currently built for the case when the rclone is the only instance 177 performing operations to the mount point. The event system, which is the proton 178 API system that provides visibility of what has changed on the drive, is yet 179 to be implemented, so updates from other clients won’t be reflected in the 180 cache. Thus, if there are concurrent clients accessing the same mount point, 181 then we might have a problem with caching the stale data.`, 182 Advanced: true, 183 Default: true, 184 }}, 185 }) 186 } 187 188 // Options defines the configuration for this backend 189 type Options struct { 190 Username string `config:"username"` 191 Password string `config:"password"` 192 MailboxPassword string `config:"mailbox_password"` 193 TwoFA string `config:"2fa"` 194 195 // advanced 196 Enc encoder.MultiEncoder `config:"encoding"` 197 ReportOriginalSize bool `config:"original_file_size"` 198 AppVersion string `config:"app_version"` 199 ReplaceExistingDraft bool `config:"replace_existing_draft"` 200 EnableCaching bool `config:"enable_caching"` 201 } 202 203 // Fs represents a remote proton drive 204 type Fs struct { 205 name string // name of this remote 206 // Notice that for ProtonDrive, it's attached under rootLink (usually /root) 207 root string // the path we are working on. 208 opt Options // parsed config options 209 ci *fs.ConfigInfo // global config 210 features *fs.Features // optional features 211 pacer *fs.Pacer // pacer for API calls 212 dirCache *dircache.DirCache // Map of directory path to directory id 213 protonDrive *protonDriveAPI.ProtonDrive // the Proton API bridging library 214 } 215 216 // Object describes an object 217 type Object struct { 218 fs *Fs // what this object is part of 219 remote string // The remote path (relative to the fs.root) 220 size int64 // size of the object (on server, after encryption) 221 originalSize *int64 // size of the object (after decryption) 222 digests *string // object original content 223 blockSizes []int64 // the block sizes of the encrypted file 224 modTime time.Time // modification time of the object 225 createdTime time.Time // creation time of the object 226 id string // ID of the object 227 mimetype string // mimetype of the file 228 229 link *proton.Link // link data on proton server 230 } 231 232 // shouldRetry returns a boolean as to whether this err deserves to be 233 // retried. It returns the err as a convenience 234 func shouldRetry(ctx context.Context, err error) (bool, error) { 235 return false, err 236 } 237 238 //------------------------------------------------------------------------------ 239 240 // Name of the remote (as passed into NewFs) 241 func (f *Fs) Name() string { 242 return f.name 243 } 244 245 // Root of the remote (as passed into NewFs) 246 func (f *Fs) Root() string { 247 return f.opt.Enc.ToStandardPath(f.root) 248 } 249 250 // String converts this Fs to a string 251 func (f *Fs) String() string { 252 return fmt.Sprintf("proton drive root link ID '%s'", f.root) 253 } 254 255 // Features returns the optional features of this Fs 256 func (f *Fs) Features() *fs.Features { 257 return f.features 258 } 259 260 // run all the dir/remote through this 261 func (f *Fs) sanitizePath(_path string) string { 262 _path = path.Clean(_path) 263 if _path == "." || _path == "/" { 264 return "" 265 } 266 267 return f.opt.Enc.FromStandardPath(_path) 268 } 269 270 func getConfigMap(m configmap.Mapper) (uid, accessToken, refreshToken, saltedKeyPass string, ok bool) { 271 if accessToken, ok = m.Get(clientAccessTokenKey); !ok { 272 return 273 } 274 275 if uid, ok = m.Get(clientUIDKey); !ok { 276 return 277 } 278 279 if refreshToken, ok = m.Get(clientRefreshTokenKey); !ok { 280 return 281 } 282 283 if saltedKeyPass, ok = m.Get(clientSaltedKeyPassKey); !ok { 284 return 285 } 286 _saltedKeyPass = saltedKeyPass 287 288 return 289 } 290 291 func setConfigMap(m configmap.Mapper, uid, accessToken, refreshToken, saltedKeyPass string) { 292 m.Set(clientUIDKey, uid) 293 m.Set(clientAccessTokenKey, accessToken) 294 m.Set(clientRefreshTokenKey, refreshToken) 295 m.Set(clientSaltedKeyPassKey, saltedKeyPass) 296 _saltedKeyPass = saltedKeyPass 297 } 298 299 func clearConfigMap(m configmap.Mapper) { 300 setConfigMap(m, "", "", "", "") 301 _saltedKeyPass = "" 302 } 303 304 func authHandler(auth proton.Auth) { 305 // fs.Debugf("authHandler called") 306 setConfigMap(_mapper, auth.UID, auth.AccessToken, auth.RefreshToken, _saltedKeyPass) 307 } 308 309 func deAuthHandler() { 310 // fs.Debugf("deAuthHandler called") 311 clearConfigMap(_mapper) 312 } 313 314 func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper) (*protonDriveAPI.ProtonDrive, error) { 315 config := protonDriveAPI.NewDefaultConfig() 316 config.AppVersion = opt.AppVersion 317 config.UserAgent = f.ci.UserAgent // opt.UserAgent 318 319 config.ReplaceExistingDraft = opt.ReplaceExistingDraft 320 config.EnableCaching = opt.EnableCaching 321 322 // let's see if we have the cached access credential 323 uid, accessToken, refreshToken, saltedKeyPass, hasUseReusableLoginCredentials := getConfigMap(m) 324 _saltedKeyPass = saltedKeyPass 325 326 if hasUseReusableLoginCredentials { 327 fs.Debugf(f, "Has cached credentials") 328 config.UseReusableLogin = true 329 330 config.ReusableCredential.UID = uid 331 config.ReusableCredential.AccessToken = accessToken 332 config.ReusableCredential.RefreshToken = refreshToken 333 config.ReusableCredential.SaltedKeyPass = saltedKeyPass 334 335 protonDrive /* credential will be nil since access credentials are passed in */, _, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler) 336 if err != nil { 337 fs.Debugf(f, "Cached credential doesn't work, clearing and using the fallback login method") 338 // clear the access token on failure 339 clearConfigMap(m) 340 341 fs.Debugf(f, "couldn't initialize a new proton drive instance using cached credentials: %v", err) 342 // we fallback to username+password login -> don't throw an error here 343 // return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err) 344 } else { 345 fs.Debugf(f, "Used cached credential to initialize the ProtonDrive API") 346 return protonDrive, nil 347 } 348 } 349 350 // if not, let's try to log the user in using username and password (and 2FA if required) 351 fs.Debugf(f, "Using username and password to log in") 352 config.UseReusableLogin = false 353 config.FirstLoginCredential.Username = opt.Username 354 config.FirstLoginCredential.Password = opt.Password 355 config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword 356 config.FirstLoginCredential.TwoFA = opt.TwoFA 357 protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler) 358 if err != nil { 359 return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err) 360 } 361 362 fs.Debugf(f, "Used username and password to initialize the ProtonDrive API") 363 setConfigMap(m, auth.UID, auth.AccessToken, auth.RefreshToken, auth.SaltedKeyPass) 364 365 return protonDrive, nil 366 } 367 368 // NewFs constructs an Fs from the path, container:path 369 func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { 370 // pacer is not used in NewFs() 371 _mapper = m 372 373 // Parse config into Options struct 374 opt := new(Options) 375 err := configstruct.Set(m, opt) 376 if err != nil { 377 return nil, err 378 } 379 if opt.Password != "" { 380 var err error 381 opt.Password, err = obscure.Reveal(opt.Password) 382 if err != nil { 383 return nil, fmt.Errorf("couldn't decrypt password: %w", err) 384 } 385 } 386 387 if opt.MailboxPassword != "" { 388 var err error 389 opt.MailboxPassword, err = obscure.Reveal(opt.MailboxPassword) 390 if err != nil { 391 return nil, fmt.Errorf("couldn't decrypt mailbox password: %w", err) 392 } 393 } 394 395 ci := fs.GetConfig(ctx) 396 397 root = strings.Trim(root, "/") 398 399 f := &Fs{ 400 name: name, 401 root: root, 402 opt: *opt, 403 ci: ci, 404 pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), 405 } 406 407 f.features = (&fs.Features{ 408 ReadMimeType: true, 409 CanHaveEmptyDirectories: true, 410 /* can't have multiple threads downloading 411 The raw file is split into equally-sized (currently 4MB, but it might change in the future, say to 8MB, 16MB, etc.) blocks, except the last one which might be smaller than 4MB. 412 Each block is encrypted separately, where the size and sha1 after the encryption is performed on the block is added to the metadata of the block, but the original block size and sha1 is not in the metadata. 413 We can make assumption and implement the chunker, but for now, we would rather be safe about it, and let the block being concurrently downloaded and decrypted in the background, to speed up the download operation! 414 */ 415 NoMultiThreading: true, 416 }).Fill(ctx, f) 417 418 protonDrive, err := newProtonDrive(ctx, f, opt, m) 419 if err != nil { 420 return nil, err 421 } 422 f.protonDrive = protonDrive 423 424 root = f.sanitizePath(root) 425 f.dirCache = dircache.New( 426 root, /* root folder path */ 427 protonDrive.MainShare.LinkID, /* real root ID is the root folder, since we can't go past this folder */ 428 f, 429 ) 430 err = f.dirCache.FindRoot(ctx, false) 431 if err != nil { 432 // if the root directory is not found, the initialization will still work 433 // but if it's other kinds of error, then we raise it 434 if err != fs.ErrorDirNotFound { 435 return nil, fmt.Errorf("couldn't initialize a new root remote: %w", err) 436 } 437 438 // Assume it is a file (taken and modified from box.go) 439 newRoot, remote := dircache.SplitPath(root) 440 tempF := *f 441 tempF.dirCache = dircache.New(newRoot, protonDrive.MainShare.LinkID, &tempF) 442 tempF.root = newRoot 443 // Make new Fs which is the parent 444 err = tempF.dirCache.FindRoot(ctx, false) 445 if err != nil { 446 // No root so return old f 447 return f, nil 448 } 449 _, err := tempF.newObjectWithLink(ctx, remote, nil) 450 if err != nil { 451 if err == fs.ErrorObjectNotFound { 452 // File doesn't exist so return old f 453 return f, nil 454 } 455 return nil, err 456 } 457 f.features.Fill(ctx, &tempF) 458 // XXX: update the old f here instead of returning tempF, since 459 // `features` were already filled with functions having *f as a receiver. 460 // See https://github.com/artpar/rclone/issues/2182 461 f.dirCache = tempF.dirCache 462 f.root = tempF.root 463 // return an error with an fs which points to the parent 464 return f, fs.ErrorIsFile 465 } 466 467 return f, nil 468 } 469 470 //------------------------------------------------------------------------------ 471 472 // CleanUp deletes all files currently in trash 473 func (f *Fs) CleanUp(ctx context.Context) error { 474 return f.pacer.Call(func() (bool, error) { 475 err := f.protonDrive.EmptyTrash(ctx) 476 return shouldRetry(ctx, err) 477 }) 478 } 479 480 // NewObject finds the Object at remote. If it can't be found 481 // it returns the error ErrorObjectNotFound. 482 // 483 // If remote points to a directory then it should return 484 // ErrorIsDir if possible without doing any extra work, 485 // otherwise ErrorObjectNotFound. 486 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 487 return f.newObjectWithLink(ctx, remote, nil) 488 } 489 490 func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) { 491 // attempt to locate the file 492 leaf, folderLinkID, err := f.dirCache.FindPath(ctx, f.sanitizePath(remote), false) 493 if err != nil { 494 if err == fs.ErrorDirNotFound { 495 // parent folder of the file not found, we for sure can't find the file 496 return nil, fs.ErrorObjectNotFound 497 } 498 // other error has occurred 499 return nil, err 500 } 501 502 var link *proton.Link 503 if err = f.pacer.Call(func() (bool, error) { 504 link, err = f.protonDrive.SearchByNameInActiveFolderByID(ctx, folderLinkID, leaf, true, false, proton.LinkStateActive) 505 return shouldRetry(ctx, err) 506 }); err != nil { 507 return nil, err 508 } 509 if link == nil { // both link and err are nil, file not found 510 return nil, fs.ErrorObjectNotFound 511 } 512 513 return link, nil 514 } 515 516 // readMetaDataForRemote reads the metadata from the remote 517 func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) { 518 link, err := f.getObjectLink(ctx, remote) 519 if err != nil { 520 return nil, nil, err 521 } 522 523 var fileSystemAttrs *protonDriveAPI.FileSystemAttrs 524 if err = f.pacer.Call(func() (bool, error) { 525 fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link) 526 return shouldRetry(ctx, err) 527 }); err != nil { 528 return nil, nil, err 529 } 530 531 return link, fileSystemAttrs, nil 532 } 533 534 // readMetaData gets the metadata if it hasn't already been fetched 535 // 536 // it also sets the info 537 func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) { 538 if o.link != nil { 539 return nil 540 } 541 542 link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link) 543 if err != nil { 544 return err 545 } 546 547 o.id = link.LinkID 548 o.size = link.Size 549 o.modTime = time.Unix(link.ModifyTime, 0) 550 o.createdTime = time.Unix(link.CreateTime, 0) 551 o.mimetype = link.MIMEType 552 o.link = link 553 554 if fileSystemAttrs != nil { 555 o.modTime = fileSystemAttrs.ModificationTime 556 o.originalSize = &fileSystemAttrs.Size 557 o.blockSizes = fileSystemAttrs.BlockSizes 558 o.digests = &fileSystemAttrs.Digests 559 } 560 561 return nil 562 } 563 564 // Return an Object from a path 565 // 566 // If it can't be found it returns the error fs.ErrorObjectNotFound. 567 func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) { 568 o := &Object{ 569 fs: f, 570 remote: remote, 571 } 572 573 err := o.readMetaData(ctx, link) 574 if err != nil { 575 return nil, err 576 } 577 return o, nil 578 } 579 580 // List the objects and directories in dir into entries. The 581 // entries can be returned in any order but should be for a 582 // complete directory. 583 // 584 // dir should be "" to list the root, and should not have 585 // trailing slashes. 586 // 587 // This should return ErrDirNotFound if the directory isn't 588 // found. 589 // Notice that this function is expensive since everything on proton is encrypted 590 // So having a remote with 10k files, during operations like sync, might take a while and lots of bandwidth! 591 func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { 592 folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) // will handle ErrDirNotFound here 593 if err != nil { 594 return nil, err 595 } 596 597 var foldersAndFiles []*protonDriveAPI.ProtonDirectoryData 598 if err = f.pacer.Call(func() (bool, error) { 599 foldersAndFiles, err = f.protonDrive.ListDirectory(ctx, folderLinkID) 600 return shouldRetry(ctx, err) 601 }); err != nil { 602 return nil, err 603 } 604 605 entries := make(fs.DirEntries, 0) 606 for i := range foldersAndFiles { 607 remote := path.Join(dir, f.opt.Enc.ToStandardName(foldersAndFiles[i].Name)) 608 609 if foldersAndFiles[i].IsFolder { 610 f.dirCache.Put(remote, foldersAndFiles[i].Link.LinkID) 611 d := fs.NewDir(remote, time.Unix(foldersAndFiles[i].Link.ModifyTime, 0)).SetID(foldersAndFiles[i].Link.LinkID) 612 entries = append(entries, d) 613 } else { 614 obj, err := f.newObjectWithLink(ctx, remote, foldersAndFiles[i].Link) 615 if err != nil { 616 return nil, err 617 } 618 entries = append(entries, obj) 619 } 620 } 621 622 return entries, nil 623 } 624 625 // FindLeaf finds a directory of name leaf in the folder with ID pathID 626 // 627 // This should be implemented by the backend and will be called by the 628 // dircache package when appropriate. 629 func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) { 630 /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ 631 632 var link *proton.Link 633 var err error 634 if err = f.pacer.Call(func() (bool, error) { 635 link, err = f.protonDrive.SearchByNameInActiveFolderByID(ctx, pathID, leaf, false, true, proton.LinkStateActive) 636 return shouldRetry(ctx, err) 637 }); err != nil { 638 return "", false, err 639 } 640 if link == nil { 641 return "", false, nil 642 } 643 644 return link.LinkID, true, nil 645 } 646 647 // CreateDir makes a directory with pathID as parent and name leaf 648 // 649 // This should be implemented by the backend and will be called by the 650 // dircache package when appropriate. 651 func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { 652 /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ 653 654 var newID string 655 var err error 656 if err = f.pacer.Call(func() (bool, error) { 657 newID, err = f.protonDrive.CreateNewFolderByID(ctx, pathID, leaf) 658 return shouldRetry(ctx, err) 659 }); err != nil { 660 return "", err 661 } 662 663 return newID, err 664 } 665 666 // Put in to the remote path with the modTime given of the given size 667 // 668 // When called from outside an Fs by rclone, src.Size() will always be >= 0. 669 // But for unknown-sized objects (indicated by src.Size() == -1), Put should either 670 // return an error or upload it properly (rather than e.g. calling panic). 671 // 672 // May create the object even if it returns an error - if so 673 // will return the object and the error, otherwise will return 674 // nil and the error 675 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 676 size := src.Size() 677 if size < 0 { 678 return nil, errCanNotUploadFileWithUnknownSize 679 } 680 681 existingObj, err := f.NewObject(ctx, src.Remote()) 682 switch err { 683 case nil: 684 // object is found, we add an revision to it 685 return existingObj, existingObj.Update(ctx, in, src, options...) 686 case fs.ErrorObjectNotFound: 687 // object not found, so we need to create it 688 remote := src.Remote() 689 size := src.Size() 690 modTime := src.ModTime(ctx) 691 692 obj, err := f.createObject(ctx, remote, modTime, size) 693 if err != nil { 694 return nil, err 695 } 696 return obj, obj.Update(ctx, in, src, options...) 697 default: 698 // real error caught 699 return nil, err 700 } 701 } 702 703 // Creates from the parameters passed in a half finished Object which 704 // must have setMetaData called on it 705 // 706 // Returns the object, leaf, directoryID and error. 707 // 708 // Used to create new objects 709 func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (*Object, error) { 710 // ˇ-------ˇ filename 711 // e.g. /root/a/b/c/test.txt 712 // ^~~~~~~~~~~^ dirPath 713 714 // Create the directory for the object if it doesn't exist 715 _, _, err := f.dirCache.FindPath(ctx, f.sanitizePath(remote), true) 716 if err != nil { 717 return nil, err 718 } 719 720 // Temporary Object under construction 721 obj := &Object{ 722 fs: f, 723 remote: remote, 724 size: size, 725 originalSize: nil, 726 id: "", 727 modTime: modTime, 728 mimetype: "", 729 link: nil, 730 } 731 return obj, nil 732 } 733 734 // Mkdir makes the directory (container, bucket) 735 // 736 // Shouldn't return an error if it already exists 737 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 738 _, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), true) 739 return err 740 } 741 742 // Rmdir removes the directory (container, bucket) if empty 743 // 744 // Return an error if it doesn't exist or isn't empty 745 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 746 folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) 747 if err == fs.ErrorDirNotFound { 748 return fmt.Errorf("[Rmdir] cannot find LinkID for dir %s (%s)", dir, f.sanitizePath(dir)) 749 } else if err != nil { 750 return err 751 } 752 753 if err = f.pacer.Call(func() (bool, error) { 754 err = f.protonDrive.MoveFolderToTrashByID(ctx, folderLinkID, true) 755 return shouldRetry(ctx, err) 756 }); err != nil { 757 return err 758 } 759 760 f.dirCache.FlushDir(f.sanitizePath(dir)) 761 return nil 762 } 763 764 // Precision of the ModTimes in this Fs 765 func (f *Fs) Precision() time.Duration { 766 return time.Second 767 } 768 769 // DirCacheFlush an optional interface to flush internal directory cache 770 // DirCacheFlush resets the directory cache - used in testing 771 // as an optional interface 772 func (f *Fs) DirCacheFlush() { 773 f.dirCache.ResetRoot() 774 f.protonDrive.ClearCache() 775 } 776 777 // Hashes returns the supported hash types of the filesystem 778 func (f *Fs) Hashes() hash.Set { 779 return hash.Set(hash.SHA1) 780 } 781 782 // About gets quota information 783 func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { 784 var user *proton.User 785 var err error 786 if err = f.pacer.Call(func() (bool, error) { 787 user, err = f.protonDrive.About(ctx) 788 return shouldRetry(ctx, err) 789 }); err != nil { 790 return nil, err 791 } 792 793 total := user.MaxSpace 794 used := user.UsedSpace 795 free := total - used 796 797 usage := &fs.Usage{ 798 Total: &total, 799 Used: &used, 800 Free: &free, 801 } 802 803 return usage, nil 804 } 805 806 // ------------------------------------------------------------ 807 808 // Fs returns the parent Fs 809 func (o *Object) Fs() fs.Info { 810 return o.fs 811 } 812 813 // Return a string version 814 func (o *Object) String() string { 815 if o == nil { 816 return "<nil>" 817 } 818 return o.remote 819 } 820 821 // Remote returns the remote path 822 func (o *Object) Remote() string { 823 return o.remote 824 } 825 826 // Hash returns the hashes of an object 827 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 828 if t != hash.SHA1 { 829 return "", hash.ErrUnsupported 830 } 831 832 if o.digests != nil { 833 return *o.digests, nil 834 } 835 836 // sha1 not cached: we fetch and try to obtain the sha1 of the link 837 fileSystemAttrs, err := o.fs.protonDrive.GetActiveRevisionAttrsByID(ctx, o.ID()) 838 if err != nil { 839 return "", err 840 } 841 842 if fileSystemAttrs == nil || fileSystemAttrs.Digests == "" { 843 fs.Debugf(o, "file sha1 digest missing") 844 return "", nil 845 } 846 return fileSystemAttrs.Digests, nil 847 } 848 849 // Size returns the size of an object in bytes 850 func (o *Object) Size() int64 { 851 if o.fs.opt.ReportOriginalSize { 852 // if ReportOriginalSize is set, we will generate an error when the original size failed to be parsed 853 // this is crucial as features like Open() will need to use the proper size to operate the seek/range operator 854 if o.originalSize != nil { 855 return *o.originalSize 856 } 857 858 fs.Debugf(o, "Original file size missing") 859 } 860 return o.size 861 } 862 863 // ModTime returns the modification time of the object 864 // 865 // It attempts to read the objects mtime and if that isn't present the 866 // LastModified returned in the http headers 867 func (o *Object) ModTime(ctx context.Context) time.Time { 868 return o.modTime 869 } 870 871 // SetModTime sets the modification time of the local fs object 872 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { 873 return fs.ErrorCantSetModTime 874 } 875 876 // Storable returns a boolean showing whether this object storable 877 func (o *Object) Storable() bool { 878 return true 879 } 880 881 // Open opens the file for read. Call Close() on the returned io.ReadCloser 882 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { 883 fs.FixRangeOption(options, *o.originalSize) 884 var offset, limit int64 = 0, -1 885 for _, option := range options { // if the caller passes in nil for options, it will become array of nil 886 switch x := option.(type) { 887 case *fs.SeekOption: 888 offset = x.Offset 889 case *fs.RangeOption: 890 offset, limit = x.Decode(o.Size()) 891 default: 892 if option.Mandatory() { 893 fs.Logf(o, "Unsupported mandatory option: %v", option) 894 } 895 } 896 } 897 898 // download and decrypt the file 899 var reader io.ReadCloser 900 var fileSystemAttrs *protonDriveAPI.FileSystemAttrs 901 var sizeOnServer int64 902 var err error 903 if err = o.fs.pacer.Call(func() (bool, error) { 904 reader, sizeOnServer, fileSystemAttrs, err = o.fs.protonDrive.DownloadFileByID(ctx, o.id, offset) 905 return shouldRetry(ctx, err) 906 }); err != nil { 907 return nil, err 908 } 909 910 if fileSystemAttrs != nil { 911 o.originalSize = &fileSystemAttrs.Size 912 o.modTime = fileSystemAttrs.ModificationTime 913 o.digests = &fileSystemAttrs.Digests 914 o.blockSizes = fileSystemAttrs.BlockSizes 915 } else { 916 fs.Debugf(o, "fileSystemAttrs is nil: using fallback size, and now digests and blocksizes available") 917 o.originalSize = &sizeOnServer 918 o.size = sizeOnServer 919 o.digests = nil 920 o.blockSizes = nil 921 } 922 923 retReader := io.NopCloser(reader) // the NewLimitedReadCloser will deal with the limit 924 925 // deal with limit 926 return readers.NewLimitedReadCloser(retReader, limit), nil 927 } 928 929 // Update in to the object with the modTime given of the given size 930 // 931 // When called from outside an Fs by rclone, src.Size() will always be >= 0. 932 // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either 933 // return an error or update the object properly (rather than e.g. calling panic). 934 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 935 size := src.Size() 936 if size < 0 { 937 return errCanNotUploadFileWithUnknownSize 938 } 939 940 remote := o.Remote() 941 leaf, folderLinkID, err := o.fs.dirCache.FindPath(ctx, o.fs.sanitizePath(remote), true) 942 if err != nil { 943 return err 944 } 945 946 modTime := src.ModTime(ctx) 947 var linkID string 948 var fileSystemAttrs *proton.RevisionXAttrCommon 949 if err = o.fs.pacer.Call(func() (bool, error) { 950 linkID, fileSystemAttrs, err = o.fs.protonDrive.UploadFileByReader(ctx, folderLinkID, leaf, modTime, in, 0) 951 return shouldRetry(ctx, err) 952 }); err != nil { 953 return err 954 } 955 956 var sha1Hash string 957 if val, ok := fileSystemAttrs.Digests["SHA1"]; ok { 958 sha1Hash = val 959 } else { 960 sha1Hash = "" 961 } 962 963 o.id = linkID 964 o.originalSize = &fileSystemAttrs.Size 965 o.modTime = modTime 966 o.blockSizes = fileSystemAttrs.BlockSizes 967 o.digests = &sha1Hash 968 969 return nil 970 } 971 972 // Remove an object 973 func (o *Object) Remove(ctx context.Context) error { 974 return o.fs.pacer.Call(func() (bool, error) { 975 err := o.fs.protonDrive.MoveFileToTrashByID(ctx, o.id) 976 return shouldRetry(ctx, err) 977 }) 978 } 979 980 // ID returns the ID of the Object if known, or "" if not 981 func (o *Object) ID() string { 982 return o.id 983 } 984 985 // Purge all files in the directory specified 986 // 987 // Implement this if you have a way of deleting all the files 988 // quicker than just running Remove() on the result of List() 989 // 990 // Return an error if it doesn't exist 991 func (f *Fs) Purge(ctx context.Context, dir string) error { 992 root := path.Join(f.root, dir) 993 if root == "" { 994 // we can't remove the root directory, but we can list the directory and delete every folder and file in here 995 return errCanNotPurgeRootDirectory 996 } 997 998 folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) 999 if err != nil { 1000 return err 1001 } 1002 1003 if err = f.pacer.Call(func() (bool, error) { 1004 err = f.protonDrive.MoveFolderToTrashByID(ctx, folderLinkID, false) 1005 return shouldRetry(ctx, err) 1006 }); err != nil { 1007 return err 1008 } 1009 1010 f.dirCache.FlushDir(dir) 1011 return nil 1012 } 1013 1014 // MimeType of an Object if known, "" otherwise 1015 func (o *Object) MimeType(ctx context.Context) string { 1016 return o.mimetype 1017 } 1018 1019 // Disconnect the current user 1020 func (f *Fs) Disconnect(ctx context.Context) error { 1021 return f.pacer.Call(func() (bool, error) { 1022 err := f.protonDrive.Logout(ctx) 1023 return shouldRetry(ctx, err) 1024 }) 1025 } 1026 1027 // Move src to this remote using server-side move operations. 1028 // 1029 // This is stored with the remote path given. 1030 // 1031 // It returns the destination Object and a possible error. 1032 // 1033 // Will only be called if src.Fs().Name() == f.Name() 1034 // 1035 // If it isn't possible then return fs.ErrorCantMove 1036 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 1037 srcObj, ok := src.(*Object) 1038 if !ok { 1039 fs.Debugf(src, "Can't move - not same remote type") 1040 return nil, fs.ErrorCantMove 1041 } 1042 1043 // check if the remote (dst) exists 1044 _, err := f.NewObject(ctx, remote) 1045 if err != nil { 1046 if err != fs.ErrorObjectNotFound { 1047 return nil, err 1048 } 1049 // object is indeed not found 1050 } else { 1051 // object at the dst exists 1052 return nil, fs.ErrorCantMove 1053 } 1054 1055 // attempt the move 1056 dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, f.sanitizePath(remote), true) 1057 if err != nil { 1058 return nil, err 1059 } 1060 if err = f.pacer.Call(func() (bool, error) { 1061 err = f.protonDrive.MoveFileByID(ctx, srcObj.id, dstDirectoryID, dstLeaf) 1062 return shouldRetry(ctx, err) 1063 }); err != nil { 1064 return nil, err 1065 } 1066 1067 f.dirCache.FlushDir(f.sanitizePath(src.Remote())) 1068 1069 return f.NewObject(ctx, remote) 1070 } 1071 1072 // DirMove moves src, srcRemote to this remote at dstRemote 1073 // using server-side move operations. 1074 // 1075 // Will only be called if src.Fs().Name() == f.Name() 1076 // 1077 // If it isn't possible then return fs.ErrorCantDirMove 1078 // 1079 // If destination exists then return fs.ErrorDirExists 1080 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 1081 srcFs, ok := src.(*Fs) 1082 if !ok { 1083 fs.Debugf(srcFs, "Can't move directory - not same remote type") 1084 return fs.ErrorCantDirMove 1085 } 1086 1087 srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, f.sanitizePath(srcFs.root), f.sanitizePath(srcRemote), f.sanitizePath(f.root), f.sanitizePath(dstRemote)) 1088 if err != nil { 1089 return err 1090 } 1091 1092 if err = f.pacer.Call(func() (bool, error) { 1093 err = f.protonDrive.MoveFolderByID(ctx, srcID, dstDirectoryID, dstLeaf) 1094 return shouldRetry(ctx, err) 1095 }); err != nil { 1096 return err 1097 } 1098 1099 srcFs.dirCache.FlushDir(f.sanitizePath(srcRemote)) 1100 1101 return nil 1102 } 1103 1104 // Check the interfaces are satisfied 1105 var ( 1106 _ fs.Fs = (*Fs)(nil) 1107 _ fs.Mover = (*Fs)(nil) 1108 _ fs.DirMover = (*Fs)(nil) 1109 _ fs.DirCacheFlusher = (*Fs)(nil) 1110 _ fs.Abouter = (*Fs)(nil) 1111 _ fs.Object = (*Object)(nil) 1112 _ fs.MimeTyper = (*Object)(nil) 1113 _ fs.IDer = (*Object)(nil) 1114 )