github.com/artpar/rclone@v1.67.3/backend/hidrive/hidrive.go (about) 1 // Package hidrive provides an interface to the HiDrive object storage system. 2 package hidrive 3 4 // FIXME HiDrive only supports file or folder names of 255 characters or less. 5 // Operations that create files or folders with longer names will throw an HTTP error: 6 // - 422 Unprocessable Entity 7 // A more graceful way for rclone to handle this may be desirable. 8 9 import ( 10 "context" 11 "encoding/json" 12 "errors" 13 "fmt" 14 "io" 15 "net/http" 16 "path" 17 "strconv" 18 "time" 19 20 "github.com/artpar/rclone/lib/encoder" 21 22 "github.com/artpar/rclone/backend/hidrive/api" 23 "github.com/artpar/rclone/backend/hidrive/hidrivehash" 24 "github.com/artpar/rclone/fs" 25 "github.com/artpar/rclone/fs/config" 26 "github.com/artpar/rclone/fs/config/configmap" 27 "github.com/artpar/rclone/fs/config/configstruct" 28 "github.com/artpar/rclone/fs/config/obscure" 29 "github.com/artpar/rclone/fs/fserrors" 30 "github.com/artpar/rclone/fs/hash" 31 "github.com/artpar/rclone/lib/oauthutil" 32 "github.com/artpar/rclone/lib/pacer" 33 "github.com/artpar/rclone/lib/rest" 34 "golang.org/x/oauth2" 35 ) 36 37 const ( 38 rcloneClientID = "6b0258fdda630d34db68a3ce3cbf19ae" 39 rcloneEncryptedClientSecret = "GC7UDZ3Ra4jLcmfQSagKCDJ1JEy-mU6pBBhFrS3tDEHILrK7j3TQHUrglkO5SgZ_" 40 minSleep = 10 * time.Millisecond 41 maxSleep = 2 * time.Second 42 decayConstant = 2 // bigger for slower decay, exponential 43 defaultUploadChunkSize = 48 * fs.Mebi 44 defaultUploadCutoff = 2 * defaultUploadChunkSize 45 defaultUploadConcurrency = 4 46 ) 47 48 // Globals 49 var ( 50 // Description of how to auth for this app. 51 oauthConfig = &oauth2.Config{ 52 Endpoint: oauth2.Endpoint{ 53 AuthURL: "https://my.hidrive.com/client/authorize", 54 TokenURL: "https://my.hidrive.com/oauth2/token", 55 }, 56 ClientID: rcloneClientID, 57 ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), 58 RedirectURL: oauthutil.TitleBarRedirectURL, 59 } 60 // hidrivehashType is the hash.Type for HiDrive hashes. 61 hidrivehashType hash.Type 62 ) 63 64 // Register the backend with Fs. 65 func init() { 66 hidrivehashType = hash.RegisterHash("hidrive", "HiDriveHash", 40, hidrivehash.New) 67 fs.Register(&fs.RegInfo{ 68 Name: "hidrive", 69 Description: "HiDrive", 70 NewFs: NewFs, 71 Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { 72 // Parse config into Options struct 73 opt := new(Options) 74 err := configstruct.Set(m, opt) 75 if err != nil { 76 return nil, fmt.Errorf("couldn't parse config into struct: %w", err) 77 } 78 79 //fs.Debugf(nil, "hidrive: configuring oauth-token.") 80 oauthConfig.Scopes = createHiDriveScopes(opt.ScopeRole, opt.ScopeAccess) 81 return oauthutil.ConfigOut("", &oauthutil.Options{ 82 OAuth2Config: oauthConfig, 83 }) 84 }, 85 Options: append(oauthutil.SharedOptions, []fs.Option{{ 86 Name: "scope_access", 87 Help: "Access permissions that rclone should use when requesting access from HiDrive.", 88 Default: "rw", 89 Examples: []fs.OptionExample{{ 90 Value: "rw", 91 Help: "Read and write access to resources.", 92 }, { 93 Value: "ro", 94 Help: "Read-only access to resources.", 95 }}, 96 }, { 97 Name: "scope_role", 98 Help: "User-level that rclone should use when requesting access from HiDrive.", 99 Default: "user", 100 Examples: []fs.OptionExample{{ 101 Value: "user", 102 Help: `User-level access to management permissions. 103 This will be sufficient in most cases.`, 104 }, { 105 Value: "admin", 106 Help: "Extensive access to management permissions.", 107 }, { 108 Value: "owner", 109 Help: "Full access to management permissions.", 110 }}, 111 Advanced: true, 112 }, { 113 Name: "root_prefix", 114 Help: `The root/parent folder for all paths. 115 116 Fill in to use the specified folder as the parent for all paths given to the remote. 117 This way rclone can use any folder as its starting point.`, 118 Default: "/", 119 Examples: []fs.OptionExample{{ 120 Value: "/", 121 Help: `The topmost directory accessible by rclone. 122 This will be equivalent with "root" if rclone uses a regular HiDrive user account.`, 123 }, { 124 Value: "root", 125 Help: `The topmost directory of the HiDrive user account`, 126 }, { 127 Value: "", 128 Help: `This specifies that there is no root-prefix for your paths. 129 When using this you will always need to specify paths to this remote with a valid parent e.g. "remote:/path/to/dir" or "remote:root/path/to/dir".`, 130 }}, 131 Advanced: true, 132 }, { 133 Name: "endpoint", 134 Help: `Endpoint for the service. 135 136 This is the URL that API-calls will be made to.`, 137 Default: "https://api.hidrive.strato.com/2.1", 138 Advanced: true, 139 }, { 140 Name: "disable_fetching_member_count", 141 Help: `Do not fetch number of objects in directories unless it is absolutely necessary. 142 143 Requests may be faster if the number of objects in subdirectories is not fetched.`, 144 Default: false, 145 Advanced: true, 146 }, { 147 Name: "chunk_size", 148 Help: fmt.Sprintf(`Chunksize for chunked uploads. 149 150 Any files larger than the configured cutoff (or files of unknown size) will be uploaded in chunks of this size. 151 152 The upper limit for this is %v bytes (about %v). 153 That is the maximum amount of bytes a single upload-operation will support. 154 Setting this above the upper limit or to a negative value will cause uploads to fail. 155 156 Setting this to larger values may increase the upload speed at the cost of using more memory. 157 It can be set to smaller values smaller to save on memory.`, MaximumUploadBytes, fs.SizeSuffix(MaximumUploadBytes)), 158 Default: defaultUploadChunkSize, 159 Advanced: true, 160 }, { 161 Name: "upload_cutoff", 162 Help: fmt.Sprintf(`Cutoff/Threshold for chunked uploads. 163 164 Any files larger than this will be uploaded in chunks of the configured chunksize. 165 166 The upper limit for this is %v bytes (about %v). 167 That is the maximum amount of bytes a single upload-operation will support. 168 Setting this above the upper limit will cause uploads to fail.`, MaximumUploadBytes, fs.SizeSuffix(MaximumUploadBytes)), 169 Default: defaultUploadCutoff, 170 Advanced: true, 171 }, { 172 Name: "upload_concurrency", 173 Help: `Concurrency for chunked uploads. 174 175 This is the upper limit for how many transfers for the same file are running concurrently. 176 Setting this above to a value smaller than 1 will cause uploads to deadlock. 177 178 If you are uploading small numbers of large files over high-speed links 179 and these uploads do not fully utilize your bandwidth, then increasing 180 this may help to speed up the transfers.`, 181 Default: defaultUploadConcurrency, 182 Advanced: true, 183 }, { 184 Name: config.ConfigEncoding, 185 Help: config.ConfigEncodingHelp, 186 Advanced: true, 187 // HiDrive only supports file or folder names of 255 characters or less. 188 // Names containing "/" are not supported. 189 // The special names "." and ".." are not supported. 190 Default: (encoder.EncodeZero | 191 encoder.EncodeSlash | 192 encoder.EncodeDot), 193 }}...), 194 }) 195 } 196 197 // Options defines the configuration for this backend. 198 type Options struct { 199 EndpointAPI string `config:"endpoint"` 200 OptionalMemberCountDisabled bool `config:"disable_fetching_member_count"` 201 UploadChunkSize fs.SizeSuffix `config:"chunk_size"` 202 UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` 203 UploadConcurrency int64 `config:"upload_concurrency"` 204 Enc encoder.MultiEncoder `config:"encoding"` 205 RootPrefix string `config:"root_prefix"` 206 ScopeAccess string `config:"scope_access"` 207 ScopeRole string `config:"scope_role"` 208 } 209 210 // Fs represents a remote hidrive. 211 type Fs struct { 212 name string // name of this remote 213 root string // the path we are working on 214 opt Options // parsed options 215 features *fs.Features // optional features 216 srv *rest.Client // the connection to the server 217 pacer *fs.Pacer // pacer for API calls 218 // retryOnce is NOT intended as a pacer for API calls. 219 // The intended use case is to repeat an action that failed because 220 // some preconditions were not previously fulfilled. 221 // Code using this should then establish these preconditions 222 // and let the pacer retry the operation. 223 retryOnce *pacer.Pacer // pacer with no delays to retry certain operations once 224 tokenRenewer *oauthutil.Renew // renew the token on expiry 225 } 226 227 // Object describes a hidrive object. 228 // 229 // Will definitely have the remote-path but may lack meta-information. 230 type Object struct { 231 fs *Fs // what this object is part of 232 remote string // The remote path 233 hasMetadata bool // whether info below has been set 234 size int64 // size of the object 235 modTime time.Time // modification time of the object 236 id string // ID of the object 237 hash string // content-hash of the object 238 } 239 240 // ------------------------------------------------------------ 241 242 // Name returns the name of the remote (as passed into NewFs). 243 func (f *Fs) Name() string { 244 return f.name 245 } 246 247 // Root returns the name of the remote (as passed into NewFs). 248 func (f *Fs) Root() string { 249 return f.root 250 } 251 252 // String returns a string-representation of this Fs. 253 func (f *Fs) String() string { 254 return fmt.Sprintf("HiDrive root '%s'", f.root) 255 } 256 257 // Precision returns the precision of this Fs. 258 func (f *Fs) Precision() time.Duration { 259 return time.Second 260 } 261 262 // Hashes returns the supported hash sets. 263 func (f *Fs) Hashes() hash.Set { 264 return hash.Set(hidrivehashType) 265 } 266 267 // Features returns the optional features of this Fs. 268 func (f *Fs) Features() *fs.Features { 269 return f.features 270 } 271 272 // errorHandler parses a non 2xx error response into an error. 273 func errorHandler(resp *http.Response) error { 274 // Decode error response. 275 errResponse := new(api.Error) 276 err := rest.DecodeJSON(resp, &errResponse) 277 if err != nil { 278 fs.Debugf(nil, "Couldn't decode error response: %v", err) 279 } 280 _, err = errResponse.Code.Int64() 281 if err != nil { 282 errResponse.Code = json.Number(strconv.Itoa(resp.StatusCode)) 283 } 284 return errResponse 285 } 286 287 // NewFs creates a new file system from the path. 288 func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { 289 //fs.Debugf(nil, "hidrive: creating new Fs.") 290 // Parse config into Options struct. 291 opt := new(Options) 292 err := configstruct.Set(m, opt) 293 if err != nil { 294 return nil, err 295 } 296 297 // Clean root-prefix and root-path. 298 // NOTE: With the default-encoding "." and ".." will be encoded, 299 // but with custom encodings without encoder.EncodeDot 300 // "." and ".." will be interpreted as paths. 301 if opt.RootPrefix != "" { 302 opt.RootPrefix = path.Clean(opt.Enc.FromStandardPath(opt.RootPrefix)) 303 } 304 root = path.Clean(opt.Enc.FromStandardPath(root)) 305 306 client, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig) 307 if err != nil { 308 return nil, fmt.Errorf("failed to configure HiDrive: %w", err) 309 } 310 311 f := &Fs{ 312 name: name, 313 root: root, 314 opt: *opt, 315 srv: rest.NewClient(client).SetRoot(opt.EndpointAPI), 316 pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), 317 retryOnce: pacer.New(pacer.RetriesOption(2), pacer.MaxConnectionsOption(-1), pacer.CalculatorOption(&pacer.ZeroDelayCalculator{})), 318 } 319 f.features = (&fs.Features{ 320 CanHaveEmptyDirectories: true, 321 }).Fill(ctx, f) 322 f.srv.SetErrorHandler(errorHandler) 323 324 if ts != nil { 325 transaction := func() error { 326 resolvedRoot := f.resolvePath("") 327 _, err := f.fetchMetadataForPath(ctx, resolvedRoot, api.HiDriveObjectNoMetadataFields) 328 return err 329 } 330 f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction) 331 } 332 333 // Do not allow the root-prefix to be nonexistent nor a directory, 334 // but it can be empty. 335 if f.opt.RootPrefix != "" { 336 item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields) 337 if err != nil { 338 return nil, fmt.Errorf("could not access root-prefix: %w", err) 339 } 340 if item.Type != api.HiDriveObjectTypeDirectory { 341 return nil, errors.New("the root-prefix needs to point to a valid directory or be empty") 342 } 343 } 344 345 resolvedRoot := f.resolvePath("") 346 item, err := f.fetchMetadataForPath(ctx, resolvedRoot, api.HiDriveObjectNoMetadataFields) 347 if err != nil { 348 if isHTTPError(err, 404) { 349 // NOTE: NewFs needs to work with paths that do not exist, 350 // in case they will be created later (see mkdir). 351 return f, nil 352 } 353 return nil, fmt.Errorf("could not access root-path: %w", err) 354 } 355 if item.Type != api.HiDriveObjectTypeDirectory { 356 fs.Debugf(f, "The root is not a directory. Setting its parent-directory as the new root.") 357 // NOTE: There is no need to check 358 // if the parent-directory is inside the root-prefix: 359 // If the parent-directory was outside, 360 // then the resolved path would be the root-prefix, 361 // therefore the root-prefix would point to a file, 362 // which has already been checked for. 363 // In case the root-prefix is empty, this needs not be checked, 364 // because top-level files cannot exist. 365 f.root = path.Dir(f.root) 366 return f, fs.ErrorIsFile 367 } 368 369 return f, nil 370 } 371 372 // newObject constructs an Object by calling the given function metaFiller 373 // on an Object with no metadata. 374 // 375 // metaFiller should set the metadata of the object or 376 // return an appropriate error. 377 func (f *Fs) newObject(remote string, metaFiller func(*Object) error) (fs.Object, error) { 378 o := &Object{ 379 fs: f, 380 remote: remote, 381 } 382 var err error 383 if metaFiller != nil { 384 err = metaFiller(o) 385 } 386 if err != nil { 387 return nil, err 388 } 389 return o, nil 390 } 391 392 // newObjectFromHiDriveObject constructs an Object from the given api.HiDriveObject. 393 func (f *Fs) newObjectFromHiDriveObject(remote string, info *api.HiDriveObject) (fs.Object, error) { 394 metaFiller := func(o *Object) error { 395 return o.setMetadata(info) 396 } 397 return f.newObject(remote, metaFiller) 398 } 399 400 // NewObject finds the Object at remote. 401 // 402 // If remote points to a directory then it returns fs.ErrorIsDir. 403 // If it can not be found it returns the error fs.ErrorObjectNotFound. 404 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 405 //fs.Debugf(f, "executing NewObject(%s).", remote) 406 metaFiller := func(o *Object) error { 407 return o.readMetadata(ctx) 408 } 409 return f.newObject(remote, metaFiller) 410 } 411 412 // List the objects and directories in dir into entries. 413 // The entries can be returned in any order, 414 // but should be for a complete directory. 415 // 416 // dir should be "" to list the root, and should not have trailing slashes. 417 // 418 // This returns fs.ErrorDirNotFound if the directory is not found. 419 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 420 //fs.Debugf(f, "executing List(%s).", dir) 421 var iErr error 422 addEntry := func(info *api.HiDriveObject) bool { 423 fs.Debugf(f, "found directory-element with name %s", info.Name) 424 remote := path.Join(dir, info.Name) 425 if info.Type == api.HiDriveObjectTypeDirectory { 426 d := fs.NewDir(remote, info.ModTime()) 427 d.SetID(info.ID) 428 d.SetSize(info.Size) 429 d.SetItems(info.MemberCount) 430 entries = append(entries, d) 431 } else if info.Type == api.HiDriveObjectTypeFile { 432 o, err := f.newObjectFromHiDriveObject(remote, info) 433 if err != nil { 434 iErr = err 435 return true 436 } 437 entries = append(entries, o) 438 } 439 return false 440 } 441 442 var fields []string 443 if f.opt.OptionalMemberCountDisabled { 444 fields = api.HiDriveObjectWithMetadataFields 445 } else { 446 fields = api.HiDriveObjectWithDirectoryMetadataFields 447 } 448 resolvedDir := f.resolvePath(dir) 449 _, err = f.iterateOverDirectory(ctx, resolvedDir, AllMembers, addEntry, fields, Unsorted) 450 451 if err != nil { 452 if isHTTPError(err, 404) { 453 return nil, fs.ErrorDirNotFound 454 } 455 return nil, err 456 } 457 if iErr != nil { 458 return nil, iErr 459 } 460 return entries, nil 461 } 462 463 // Put the contents of the io.Reader into the remote path 464 // with the modTime given of the given size. 465 // The existing or new object is returned. 466 // 467 // A new object may have been created or 468 // an existing one accessed even if an error is returned, 469 // in which case both the object and the error will be returned. 470 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 471 remote := src.Remote() 472 //fs.Debugf(f, "executing Put(%s, %v).", remote, options) 473 474 existingObj, err := f.NewObject(ctx, remote) 475 switch err { 476 case nil: 477 return existingObj, existingObj.Update(ctx, in, src, options...) 478 case fs.ErrorObjectNotFound: 479 // Object was not found, so create a new one. 480 return f.PutUnchecked(ctx, in, src, options...) 481 } 482 return nil, err 483 } 484 485 // PutStream uploads the contents of the io.Reader to the remote path 486 // with the modTime given of indeterminate size. 487 // The existing or new object is returned. 488 // 489 // A new object may have been created or 490 // an existing one accessed even if an error is returned, 491 // in which case both the object and the error will be returned. 492 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 493 //fs.Debugf(f, "executing PutStream(%s, %v).", src.Remote(), options) 494 495 return f.Put(ctx, in, src, options...) 496 } 497 498 // PutUnchecked the contents of the io.Reader into the remote path 499 // with the modTime given of the given size. 500 // This guarantees that existing objects will not be overwritten. 501 // The new object is returned. 502 // 503 // This will produce an error if an object already exists at that path. 504 // 505 // In case the upload fails and an object has been created, 506 // this will try to delete the object at that path. 507 // In case the failed upload could not be deleted, 508 // both the object and the (upload-)error will be returned. 509 func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 510 remote := src.Remote() 511 modTime := src.ModTime(ctx) 512 //fs.Debugf(f, "executing PutUnchecked(%s, %v).", remote, options) 513 resolvedPath := f.resolvePath(remote) 514 515 // NOTE: The file creation operation is a single atomic operation. 516 // Thus uploading as much content as is reasonable 517 // (i.e. everything up to the cutoff) in the first request, 518 // avoids files being created on upload failure for small files. 519 // (As opposed to creating an empty file and then uploading the content.) 520 tmpReader, bytesRead, err := readerForChunk(in, int(f.opt.UploadCutoff)) 521 cutoffReader := cachedReader(tmpReader) 522 if err != nil { 523 return nil, err 524 } 525 526 var info *api.HiDriveObject 527 err = f.retryOnce.Call(func() (bool, error) { 528 var createErr error 529 // Reset the reading index (in case this is a retry). 530 if _, createErr = cutoffReader.Seek(0, io.SeekStart); createErr != nil { 531 return false, createErr 532 } 533 info, createErr = f.createFile(ctx, resolvedPath, cutoffReader, modTime, IgnoreOnExist) 534 535 if createErr == fs.ErrorDirNotFound { 536 // Create the parent-directory for the object and repeat request. 537 _, parentErr := f.createDirectories(ctx, path.Dir(resolvedPath), IgnoreOnExist) 538 if parentErr != nil && parentErr != fs.ErrorDirExists { 539 fs.Errorf(f, "Tried to create parent-directory for '%s', but failed.", resolvedPath) 540 return false, parentErr 541 } 542 return true, createErr 543 } 544 return false, createErr 545 }) 546 547 if err != nil { 548 return nil, err 549 } 550 551 o, err := f.newObjectFromHiDriveObject(remote, info) 552 553 if err != nil { 554 return nil, err 555 } 556 557 if fs.SizeSuffix(bytesRead) < f.opt.UploadCutoff { 558 return o, nil 559 } 560 // If there is more left to write, o.Update needs to skip ahead. 561 // Use a fs.SeekOption with the current offset to do this. 562 options = append(options, &fs.SeekOption{Offset: int64(bytesRead)}) 563 err = o.Update(ctx, in, src, options...) 564 565 if err == nil { 566 return o, nil 567 } 568 569 // Try to remove object at path after the its content could not be uploaded. 570 deleteErr := f.pacer.Call(func() (bool, error) { 571 deleteErr := o.Remove(ctx) 572 return deleteErr == fs.ErrorObjectNotFound, deleteErr 573 }) 574 575 if deleteErr == nil { 576 return nil, err 577 } 578 579 fs.Errorf(f, "Tried to delete failed upload at path '%s', but failed: %v", resolvedPath, deleteErr) 580 return o, err 581 } 582 583 // Mkdir creates the directory if it does not exist. 584 // 585 // This will create any missing parent directories. 586 // 587 // NOTE: If an error occurs while the parent directories are being created, 588 // any directories already created will NOT be deleted again. 589 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 590 //fs.Debugf(f, "executing Mkdir(%s).", dir) 591 resolvedDir := f.resolvePath(dir) 592 _, err := f.createDirectories(ctx, resolvedDir, IgnoreOnExist) 593 594 if err == fs.ErrorDirExists { 595 // NOTE: The conflict is caused by the directory already existing, 596 // which should be ignored here. 597 return nil 598 } 599 600 return err 601 } 602 603 // Rmdir removes the directory if empty. 604 // 605 // This returns fs.ErrorDirNotFound if the directory is not found. 606 // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty. 607 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 608 //fs.Debugf(f, "executing Rmdir(%s).", dir) 609 resolvedDir := f.resolvePath(dir) 610 return f.deleteDirectory(ctx, resolvedDir, false) 611 } 612 613 // Purge removes the directory and all of its contents. 614 // 615 // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty. 616 func (f *Fs) Purge(ctx context.Context, dir string) error { 617 //fs.Debugf(f, "executing Purge(%s).", dir) 618 resolvedDir := f.resolvePath(dir) 619 return f.deleteDirectory(ctx, resolvedDir, true) 620 } 621 622 // shouldRetryAndCreateParents returns a boolean as to whether the operation 623 // should be retried after the parent-directories of the destination have been created. 624 // If so, it will create the parent-directories. 625 // 626 // If any errors arise while finding the source or 627 // creating the parent-directory those will be returned. 628 // Otherwise returns the originalError. 629 func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) { 630 if fserrors.ContextError(ctx, &originalError) { 631 return false, originalError 632 } 633 if isHTTPError(originalError, 404) { 634 // Check if source is missing. 635 _, srcErr := f.fetchMetadataForPath(ctx, sourcePath, api.HiDriveObjectNoMetadataFields) 636 if srcErr != nil { 637 return false, srcErr 638 } 639 // Source exists, so the parent of the destination must have been missing. 640 // Create the parent-directory and repeat request. 641 _, parentErr := f.createDirectories(ctx, path.Dir(destinationPath), IgnoreOnExist) 642 if parentErr != nil && parentErr != fs.ErrorDirExists { 643 fs.Errorf(f, "Tried to create parent-directory for '%s', but failed.", destinationPath) 644 return false, parentErr 645 } 646 return true, originalError 647 } 648 return false, originalError 649 } 650 651 // Copy src to this remote using server-side copy operations. 652 // 653 // It returns the destination Object and a possible error. 654 // 655 // This returns fs.ErrorCantCopy if the operation cannot be performed. 656 // 657 // NOTE: If an error occurs when copying the Object, 658 // any parent-directories already created will NOT be deleted again. 659 // 660 // NOTE: This operation will expand sparse areas in the content of the source-Object 661 // to blocks of 0-bytes in the destination-Object. 662 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 663 srcObj, ok := src.(*Object) 664 if !ok { 665 fs.Debugf(src, "Can't copy - not same remote type") 666 return nil, fs.ErrorCantCopy 667 } 668 // Get the absolute path to the source. 669 srcPath := srcObj.fs.resolvePath(srcObj.Remote()) 670 //fs.Debugf(f, "executing Copy(%s, %s).", srcPath, remote) 671 dstPath := f.resolvePath(remote) 672 673 var info *api.HiDriveObject 674 err := f.retryOnce.Call(func() (bool, error) { 675 var copyErr error 676 info, copyErr = f.copyFile(ctx, srcPath, dstPath, OverwriteOnExist) 677 return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, copyErr) 678 }) 679 680 if err != nil { 681 return nil, err 682 } 683 dstObj, err := f.newObjectFromHiDriveObject(remote, info) 684 if err != nil { 685 return nil, err 686 } 687 return dstObj, nil 688 } 689 690 // Move src to this remote using server-side move operations. 691 // 692 // It returns the destination Object and a possible error. 693 // 694 // This returns fs.ErrorCantMove if the operation cannot be performed. 695 // 696 // NOTE: If an error occurs when moving the Object, 697 // any parent-directories already created will NOT be deleted again. 698 // 699 // NOTE: This operation will expand sparse areas in the content of the source-Object 700 // to blocks of 0-bytes in the destination-Object. 701 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 702 srcObj, ok := src.(*Object) 703 if !ok { 704 fs.Debugf(src, "Can't move - not same remote type") 705 return nil, fs.ErrorCantMove 706 } 707 // Get the absolute path to the source. 708 srcPath := srcObj.fs.resolvePath(srcObj.Remote()) 709 //fs.Debugf(f, "executing Move(%s, %s).", srcPath, remote) 710 dstPath := f.resolvePath(remote) 711 712 var info *api.HiDriveObject 713 err := f.retryOnce.Call(func() (bool, error) { 714 var moveErr error 715 info, moveErr = f.moveFile(ctx, srcPath, dstPath, OverwriteOnExist) 716 return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, moveErr) 717 }) 718 719 if err != nil { 720 return nil, err 721 } 722 dstObj, err := f.newObjectFromHiDriveObject(remote, info) 723 if err != nil { 724 return nil, err 725 } 726 return dstObj, nil 727 728 } 729 730 // DirMove moves from src at srcRemote to this remote at dstRemote 731 // using server-side move operations. 732 // 733 // This returns fs.ErrorCantCopy if the operation cannot be performed. 734 // This returns fs.ErrorDirExists if the destination already exists. 735 // 736 // NOTE: If an error occurs when moving the directory, 737 // any parent-directories already created will NOT be deleted again. 738 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 739 srcFs, ok := src.(*Fs) 740 if !ok { 741 fs.Debugf(srcFs, "Can't move directory - not same remote type") 742 return fs.ErrorCantDirMove 743 } 744 745 // Get the absolute path to the source. 746 srcPath := srcFs.resolvePath(srcRemote) 747 //fs.Debugf(f, "executing DirMove(%s, %s).", srcPath, dstRemote) 748 dstPath := f.resolvePath(dstRemote) 749 750 err := f.retryOnce.Call(func() (bool, error) { 751 var moveErr error 752 _, moveErr = f.moveDirectory(ctx, srcPath, dstPath, IgnoreOnExist) 753 return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, moveErr) 754 }) 755 756 if err != nil { 757 if isHTTPError(err, 409) { 758 return fs.ErrorDirExists 759 } 760 return err 761 } 762 return nil 763 } 764 765 // Shutdown shutdown the fs 766 func (f *Fs) Shutdown(ctx context.Context) error { 767 f.tokenRenewer.Shutdown() 768 return nil 769 } 770 771 // ------------------------------------------------------------ 772 773 // Fs returns the parent Fs. 774 func (o *Object) Fs() fs.Info { 775 return o.fs 776 } 777 778 // String returns a string-representation of this Object. 779 func (o *Object) String() string { 780 if o == nil { 781 return "<nil>" 782 } 783 return o.remote 784 } 785 786 // Remote returns the remote path. 787 func (o *Object) Remote() string { 788 return o.remote 789 } 790 791 // ID returns the ID of the Object if known, or "" if not. 792 func (o *Object) ID() string { 793 err := o.readMetadata(context.TODO()) 794 if err != nil { 795 fs.Logf(o, "Failed to read metadata: %v", err) 796 return "" 797 } 798 return o.id 799 } 800 801 // Hash returns the selected checksum of the file. 802 // If no checksum is available it returns "". 803 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 804 err := o.readMetadata(ctx) 805 if err != nil { 806 return "", fmt.Errorf("failed to read hash from metadata: %w", err) 807 } 808 switch t { 809 case hidrivehashType: 810 return o.hash, nil 811 default: 812 return "", hash.ErrUnsupported 813 } 814 } 815 816 // Size returns the size of an object in bytes. 817 func (o *Object) Size() int64 { 818 err := o.readMetadata(context.TODO()) 819 if err != nil { 820 fs.Logf(o, "Failed to read metadata: %v", err) 821 return -1 822 } 823 return o.size 824 } 825 826 // setMetadata sets the metadata from info. 827 func (o *Object) setMetadata(info *api.HiDriveObject) error { 828 if info.Type == api.HiDriveObjectTypeDirectory { 829 return fs.ErrorIsDir 830 } 831 if info.Type != api.HiDriveObjectTypeFile { 832 return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile) 833 } 834 o.hasMetadata = true 835 o.size = info.Size 836 o.modTime = info.ModTime() 837 o.id = info.ID 838 o.hash = info.ContentHash 839 return nil 840 } 841 842 // readMetadata fetches the metadata if it has not already been fetched. 843 func (o *Object) readMetadata(ctx context.Context) error { 844 if o.hasMetadata { 845 return nil 846 } 847 resolvedPath := o.fs.resolvePath(o.remote) 848 info, err := o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields) 849 if err != nil { 850 if isHTTPError(err, 404) { 851 return fs.ErrorObjectNotFound 852 } 853 return err 854 } 855 return o.setMetadata(info) 856 } 857 858 // ModTime returns the modification time of the object. 859 func (o *Object) ModTime(ctx context.Context) time.Time { 860 err := o.readMetadata(ctx) 861 if err != nil { 862 fs.Logf(o, "Failed to read metadata: %v", err) 863 return time.Now() 864 } 865 return o.modTime 866 } 867 868 // SetModTime sets the metadata on the object to set the modification date. 869 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { 870 parameters := api.NewQueryParameters() 871 resolvedPath := o.fs.resolvePath(o.remote) 872 parameters.SetPath(resolvedPath) 873 err := parameters.SetTime("mtime", modTime) 874 875 if err != nil { 876 return err 877 } 878 879 opts := rest.Opts{ 880 Method: "PATCH", 881 Path: "/meta", 882 Parameters: parameters.Values, 883 NoResponse: true, 884 } 885 886 var resp *http.Response 887 err = o.fs.pacer.Call(func() (bool, error) { 888 resp, err = o.fs.srv.Call(ctx, &opts) 889 return o.fs.shouldRetry(ctx, resp, err) 890 }) 891 if err != nil { 892 return err 893 } 894 o.modTime = modTime 895 return nil 896 } 897 898 // Storable says whether this object can be stored. 899 func (o *Object) Storable() bool { 900 return true 901 } 902 903 // Open an object for reading. 904 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { 905 parameters := api.NewQueryParameters() 906 resolvedPath := o.fs.resolvePath(o.remote) 907 parameters.SetPath(resolvedPath) 908 909 fs.FixRangeOption(options, o.Size()) 910 opts := rest.Opts{ 911 Method: "GET", 912 Path: "/file", 913 Parameters: parameters.Values, 914 Options: options, 915 } 916 var resp *http.Response 917 var err error 918 err = o.fs.pacer.Call(func() (bool, error) { 919 resp, err = o.fs.srv.Call(ctx, &opts) 920 return o.fs.shouldRetry(ctx, resp, err) 921 }) 922 if err != nil { 923 return nil, err 924 } 925 return resp.Body, err 926 } 927 928 // Update the existing object 929 // with the contents of the io.Reader, modTime and size. 930 // 931 // For unknown-sized contents (indicated by src.Size() == -1) 932 // this will try to properly upload it in multiple chunks. 933 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 934 //fs.Debugf(o.fs, "executing Update(%s, %v).", o.remote, options) 935 modTime := src.ModTime(ctx) 936 resolvedPath := o.fs.resolvePath(o.remote) 937 938 if o.fs.tokenRenewer != nil { 939 o.fs.tokenRenewer.Start() 940 defer o.fs.tokenRenewer.Stop() 941 } 942 943 // PutUnchecked can pass a valid SeekOption to skip ahead. 944 var offset uint64 945 for _, option := range options { 946 if seekoption, ok := option.(*fs.SeekOption); ok { 947 offset = uint64(seekoption.Offset) 948 break 949 } 950 } 951 952 var info *api.HiDriveObject 953 var err, metaErr error 954 if offset > 0 || src.Size() == -1 || src.Size() >= int64(o.fs.opt.UploadCutoff) { 955 fs.Debugf(o.fs, "Uploading with chunks of size %v and %v transfers in parallel at path '%s'.", int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency, resolvedPath) 956 // NOTE: o.fs.opt.UploadChunkSize should always 957 // be between 0 and MaximumUploadBytes, 958 // so the conversion to an int does not cause problems for valid inputs. 959 if offset > 0 { 960 // NOTE: The offset is only set 961 // when the file was newly created, 962 // therefore the file does not need truncating. 963 _, err = o.fs.updateFileChunked(ctx, resolvedPath, in, offset, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency) 964 if err == nil { 965 err = o.SetModTime(ctx, modTime) 966 } 967 } else { 968 _, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency) 969 } 970 // Try to check if object was updated, either way. 971 // Metadata should be updated even if the upload fails. 972 info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields) 973 } else { 974 info, err = o.fs.overwriteFile(ctx, resolvedPath, cachedReader(in), modTime) 975 metaErr = err 976 } 977 978 // Update metadata of this object, 979 // if there was no error with getting the metadata. 980 if metaErr == nil { 981 metaErr = o.setMetadata(info) 982 } 983 984 // Errors with the upload-process are more relevant, return those first. 985 if err != nil { 986 return err 987 } 988 return metaErr 989 } 990 991 // Remove an object. 992 func (o *Object) Remove(ctx context.Context) error { 993 resolvedPath := o.fs.resolvePath(o.remote) 994 return o.fs.deleteObject(ctx, resolvedPath) 995 } 996 997 // Check the interfaces are satisfied. 998 var ( 999 _ fs.Fs = (*Fs)(nil) 1000 _ fs.Purger = (*Fs)(nil) 1001 _ fs.PutStreamer = (*Fs)(nil) 1002 _ fs.PutUncheckeder = (*Fs)(nil) 1003 _ fs.Copier = (*Fs)(nil) 1004 _ fs.Mover = (*Fs)(nil) 1005 _ fs.DirMover = (*Fs)(nil) 1006 _ fs.Shutdowner = (*Fs)(nil) 1007 _ fs.Object = (*Object)(nil) 1008 _ fs.IDer = (*Object)(nil) 1009 )