github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/fs/operations/operations.go (about) 1 // Package operations does generic operations on filesystems and objects 2 package operations 3 4 import ( 5 "bytes" 6 "context" 7 "encoding/base64" 8 "encoding/csv" 9 "encoding/hex" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "net/http" 14 "path" 15 "path/filepath" 16 "sort" 17 "strconv" 18 "strings" 19 "sync" 20 "sync/atomic" 21 "time" 22 23 "github.com/pkg/errors" 24 "github.com/rclone/rclone/fs" 25 "github.com/rclone/rclone/fs/accounting" 26 "github.com/rclone/rclone/fs/cache" 27 "github.com/rclone/rclone/fs/fserrors" 28 "github.com/rclone/rclone/fs/fshttp" 29 "github.com/rclone/rclone/fs/hash" 30 "github.com/rclone/rclone/fs/march" 31 "github.com/rclone/rclone/fs/object" 32 "github.com/rclone/rclone/fs/walk" 33 "github.com/rclone/rclone/lib/random" 34 "github.com/rclone/rclone/lib/readers" 35 "golang.org/x/sync/errgroup" 36 ) 37 38 // CheckHashes checks the two files to see if they have common 39 // known hash types and compares them 40 // 41 // Returns 42 // 43 // equal - which is equality of the hashes 44 // 45 // hash - the HashType. This is HashNone if either of the hashes were 46 // unset or a compatible hash couldn't be found. 47 // 48 // err - may return an error which will already have been logged 49 // 50 // If an error is returned it will return equal as false 51 func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { 52 common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) 53 // fs.Debugf(nil, "Shared hashes: %v", common) 54 if common.Count() == 0 { 55 return true, hash.None, nil 56 } 57 equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne()) 58 return equal, ht, err 59 } 60 61 // checkHashes does the work of CheckHashes but takes a hash.Type and 62 // returns the effective hash type used. 63 func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) { 64 // Calculate hashes in parallel 65 g, ctx := errgroup.WithContext(ctx) 66 g.Go(func() (err error) { 67 srcHash, err = src.Hash(ctx, ht) 68 if err != nil { 69 err = fs.CountError(err) 70 fs.Errorf(src, "Failed to calculate src hash: %v", err) 71 } 72 return err 73 }) 74 g.Go(func() (err error) { 75 dstHash, err = dst.Hash(ctx, ht) 76 if err != nil { 77 err = fs.CountError(err) 78 fs.Errorf(dst, "Failed to calculate dst hash: %v", err) 79 } 80 return err 81 }) 82 err = g.Wait() 83 if err != nil { 84 return false, ht, srcHash, dstHash, err 85 } 86 if srcHash == "" { 87 return true, hash.None, srcHash, dstHash, nil 88 } 89 if dstHash == "" { 90 return true, hash.None, srcHash, dstHash, nil 91 } 92 if srcHash != dstHash { 93 fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) 94 fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs()) 95 } else { 96 fs.Debugf(src, "%v = %s OK", ht, srcHash) 97 } 98 return srcHash == dstHash, ht, srcHash, dstHash, nil 99 } 100 101 // Equal checks to see if the src and dst objects are equal by looking at 102 // size, mtime and hash 103 // 104 // If the src and dst size are different then it is considered to be 105 // not equal. If --size-only is in effect then this is the only check 106 // that is done. If --ignore-size is in effect then this check is 107 // skipped and the files are considered the same size. 108 // 109 // If the size is the same and the mtime is the same then it is 110 // considered to be equal. This check is skipped if using --checksum. 111 // 112 // If the size is the same and mtime is different, unreadable or 113 // --checksum is set and the hash is the same then the file is 114 // considered to be equal. In this case the mtime on the dst is 115 // updated if --checksum is not set. 116 // 117 // Otherwise the file is considered to be not equal including if there 118 // were errors reading info. 119 func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { 120 return equal(ctx, src, dst, defaultEqualOpt()) 121 } 122 123 // sizeDiffers compare the size of src and dst taking into account the 124 // various ways of ignoring sizes 125 func sizeDiffers(src, dst fs.ObjectInfo) bool { 126 if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { 127 return false 128 } 129 return src.Size() != dst.Size() 130 } 131 132 var checksumWarning sync.Once 133 134 // options for equal function() 135 type equalOpt struct { 136 sizeOnly bool // if set only check size 137 checkSum bool // if set check checksum+size instead of modtime+size 138 updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size 139 forceModTimeMatch bool // if set assume modtimes match 140 } 141 142 // default set of options for equal() 143 func defaultEqualOpt() equalOpt { 144 return equalOpt{ 145 sizeOnly: fs.Config.SizeOnly, 146 checkSum: fs.Config.CheckSum, 147 updateModTime: !fs.Config.NoUpdateModTime, 148 forceModTimeMatch: false, 149 } 150 } 151 152 func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { 153 if sizeDiffers(src, dst) { 154 fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) 155 return false 156 } 157 if opt.sizeOnly { 158 fs.Debugf(src, "Sizes identical") 159 return true 160 } 161 162 // Assert: Size is equal or being ignored 163 164 // If checking checksum and not modtime 165 if opt.checkSum { 166 // Check the hash 167 same, ht, _ := CheckHashes(ctx, src, dst) 168 if !same { 169 fs.Debugf(src, "%v differ", ht) 170 return false 171 } 172 if ht == hash.None { 173 checksumWarning.Do(func() { 174 fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only") 175 }) 176 fs.Debugf(src, "Size of src and dst objects identical") 177 } else { 178 fs.Debugf(src, "Size and %v of src and dst objects identical", ht) 179 } 180 return true 181 } 182 183 srcModTime := src.ModTime(ctx) 184 if !opt.forceModTimeMatch { 185 // Sizes the same so check the mtime 186 modifyWindow := fs.GetModifyWindow(src.Fs(), dst.Fs()) 187 if modifyWindow == fs.ModTimeNotSupported { 188 fs.Debugf(src, "Sizes identical") 189 return true 190 } 191 dstModTime := dst.ModTime(ctx) 192 dt := dstModTime.Sub(srcModTime) 193 if dt < modifyWindow && dt > -modifyWindow { 194 fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) 195 return true 196 } 197 198 fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) 199 } 200 201 // Check if the hashes are the same 202 same, ht, _ := CheckHashes(ctx, src, dst) 203 if !same { 204 fs.Debugf(src, "%v differ", ht) 205 return false 206 } 207 if ht == hash.None { 208 // if couldn't check hash, return that they differ 209 return false 210 } 211 212 // mod time differs but hash is the same to reset mod time if required 213 if opt.updateModTime { 214 if fs.Config.DryRun { 215 fs.Logf(src, "Not updating modification time as --dry-run") 216 } else { 217 // Size and hash the same but mtime different 218 // Error if objects are treated as immutable 219 if fs.Config.Immutable { 220 fs.Errorf(dst, "StartedAt mismatch between immutable objects") 221 return false 222 } 223 // Update the mtime of the dst object here 224 err := dst.SetModTime(ctx, srcModTime) 225 if err == fs.ErrorCantSetModTime { 226 fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading") 227 return false 228 } else if err == fs.ErrorCantSetModTimeWithoutDelete { 229 fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading") 230 // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file 231 // put in the BackupDir than deleted which is what will happen if we don't delete it. 232 if fs.Config.BackupDir == "" { 233 err = dst.Remove(ctx) 234 if err != nil { 235 fs.Errorf(dst, "failed to delete before re-upload: %v", err) 236 } 237 } 238 return false 239 } else if err != nil { 240 err = fs.CountError(err) 241 fs.Errorf(dst, "Failed to set modification time: %v", err) 242 } else { 243 fs.Infof(src, "Updated modification time in destination") 244 } 245 } 246 } 247 return true 248 } 249 250 // Used to remove a failed copy 251 // 252 // Returns whether the file was successfully removed or not 253 func removeFailedCopy(ctx context.Context, dst fs.Object) bool { 254 if dst == nil { 255 return false 256 } 257 fs.Infof(dst, "Removing failed copy") 258 removeErr := dst.Remove(ctx) 259 if removeErr != nil { 260 fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) 261 return false 262 } 263 return true 264 } 265 266 // OverrideRemote is a wrapper to override the Remote for an 267 // ObjectInfo 268 type OverrideRemote struct { 269 fs.ObjectInfo 270 remote string 271 } 272 273 // NewOverrideRemote returns an OverrideRemoteObject which will 274 // return the remote specified 275 func NewOverrideRemote(oi fs.ObjectInfo, remote string) *OverrideRemote { 276 return &OverrideRemote{ 277 ObjectInfo: oi, 278 remote: remote, 279 } 280 } 281 282 // Remote returns the overridden remote name 283 func (o *OverrideRemote) Remote() string { 284 return o.remote 285 } 286 287 // MimeType returns the mime type of the underlying object or "" if it 288 // can't be worked out 289 func (o *OverrideRemote) MimeType(ctx context.Context) string { 290 if do, ok := o.ObjectInfo.(fs.MimeTyper); ok { 291 return do.MimeType(ctx) 292 } 293 return "" 294 } 295 296 // ID returns the ID of the Object if known, or "" if not 297 func (o *OverrideRemote) ID() string { 298 if do, ok := o.ObjectInfo.(fs.IDer); ok { 299 return do.ID() 300 } 301 return "" 302 } 303 304 // UnWrap returns the Object that this Object is wrapping or nil if it 305 // isn't wrapping anything 306 func (o *OverrideRemote) UnWrap() fs.Object { 307 if o, ok := o.ObjectInfo.(fs.Object); ok { 308 return o 309 } 310 return nil 311 } 312 313 // GetTier returns storage tier or class of the Object 314 func (o *OverrideRemote) GetTier() string { 315 if do, ok := o.ObjectInfo.(fs.GetTierer); ok { 316 return do.GetTier() 317 } 318 return "" 319 } 320 321 // Check all optional interfaces satisfied 322 var _ fs.FullObjectInfo = (*OverrideRemote)(nil) 323 324 // Copy src object to dst or f if nil. If dst is nil then it uses 325 // remote as the name of the new object. 326 // 327 // It returns the destination object if possible. Note that this may 328 // be nil. 329 func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { 330 tr := accounting.Stats(ctx).NewTransfer(src) 331 defer func() { 332 tr.Done(err) 333 }() 334 newDst = dst 335 if fs.Config.DryRun { 336 fs.Logf(src, "Not copying as --dry-run") 337 return newDst, nil 338 } 339 maxTries := fs.Config.LowLevelRetries 340 tries := 0 341 doUpdate := dst != nil 342 // work out which hash to use - limit to 1 hash in common 343 var common hash.Set 344 hashType := hash.None 345 if !fs.Config.IgnoreChecksum { 346 common = src.Fs().Hashes().Overlap(f.Hashes()) 347 if common.Count() > 0 { 348 hashType = common.GetOne() 349 common = hash.Set(hashType) 350 } 351 } 352 hashOption := &fs.HashesOption{Hashes: common} 353 var actionTaken string 354 for { 355 // Try server side copy first - if has optional interface and 356 // is same underlying remote 357 actionTaken = "Copied (server side copy)" 358 if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) { 359 // Check transfer limit for server side copies 360 if fs.Config.MaxTransfer >= 0 && accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) { 361 return nil, accounting.ErrorMaxTransferLimitReached 362 } 363 in := tr.Account(nil) // account the transfer 364 in.ServerSideCopyStart() 365 newDst, err = doCopy(ctx, src, remote) 366 if err == nil { 367 dst = newDst 368 in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server side transfer 369 err = in.Close() 370 } else { 371 _ = in.Close() 372 } 373 if err == fs.ErrorCantCopy { 374 tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below 375 } 376 } else { 377 err = fs.ErrorCantCopy 378 } 379 // If can't server side copy, do it manually 380 if err == fs.ErrorCantCopy { 381 if doMultiThreadCopy(f, src) { 382 // Number of streams proportional to size 383 streams := src.Size() / int64(fs.Config.MultiThreadCutoff) 384 // With maximum 385 if streams > int64(fs.Config.MultiThreadStreams) { 386 streams = int64(fs.Config.MultiThreadStreams) 387 } 388 if streams < 2 { 389 streams = 2 390 } 391 dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr) 392 if doUpdate { 393 actionTaken = "Multi-thread Copied (replaced existing)" 394 } else { 395 actionTaken = "Multi-thread Copied (new)" 396 } 397 } else { 398 var in0 io.ReadCloser 399 in0, err = newReOpen(ctx, src, hashOption, nil, fs.Config.LowLevelRetries) 400 if err != nil { 401 err = errors.Wrap(err, "failed to open source object") 402 } else { 403 if src.Size() == -1 { 404 // -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream. 405 if doUpdate { 406 actionTaken = "Copied (Rcat, replaced existing)" 407 } else { 408 actionTaken = "Copied (Rcat, new)" 409 } 410 // NB Rcat closes in0 411 dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx)) 412 newDst = dst 413 } else { 414 in := tr.Account(in0).WithBuffer() // account and buffer the transfer 415 var wrappedSrc fs.ObjectInfo = src 416 // We try to pass the original object if possible 417 if src.Remote() != remote { 418 wrappedSrc = NewOverrideRemote(src, remote) 419 } 420 if doUpdate { 421 actionTaken = "Copied (replaced existing)" 422 err = dst.Update(ctx, in, wrappedSrc, hashOption) 423 } else { 424 actionTaken = "Copied (new)" 425 dst, err = f.Put(ctx, in, wrappedSrc, hashOption) 426 } 427 closeErr := in.Close() 428 if err == nil { 429 newDst = dst 430 err = closeErr 431 } 432 } 433 } 434 } 435 } 436 tries++ 437 if tries >= maxTries { 438 break 439 } 440 // Retry if err returned a retry error 441 if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { 442 fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries) 443 tr.Reset() // skip incomplete accounting - will be overwritten by retry 444 continue 445 } 446 // otherwise finish 447 break 448 } 449 if err != nil { 450 err = fs.CountError(err) 451 fs.Errorf(src, "Failed to copy: %v", err) 452 return newDst, err 453 } 454 455 // Verify sizes are the same after transfer 456 if sizeDiffers(src, dst) { 457 err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) 458 fs.Errorf(dst, "%v", err) 459 err = fs.CountError(err) 460 removeFailedCopy(ctx, dst) 461 return newDst, err 462 } 463 464 // Verify hashes are the same after transfer - ignoring blank hashes 465 if hashType != hash.None { 466 // checkHashes has logged and counted errors 467 equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType) 468 if !equal { 469 err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) 470 fs.Errorf(dst, "%v", err) 471 err = fs.CountError(err) 472 removeFailedCopy(ctx, dst) 473 return newDst, err 474 } 475 } 476 477 fs.Infof(src, actionTaken) 478 return newDst, err 479 } 480 481 // SameObject returns true if src and dst could be pointing to the 482 // same object. 483 func SameObject(src, dst fs.Object) bool { 484 if !SameConfig(src.Fs(), dst.Fs()) { 485 return false 486 } 487 srcPath := path.Join(src.Fs().Root(), src.Remote()) 488 dstPath := path.Join(dst.Fs().Root(), dst.Remote()) 489 if dst.Fs().Features().CaseInsensitive { 490 srcPath = strings.ToLower(srcPath) 491 dstPath = strings.ToLower(dstPath) 492 } 493 return srcPath == dstPath 494 } 495 496 // Move src object to dst or fdst if nil. If dst is nil then it uses 497 // remote as the name of the new object. 498 // 499 // Note that you must check the destination does not exist before 500 // calling this and pass it as dst. If you pass dst=nil and the 501 // destination does exist then this may create duplicates or return 502 // errors. 503 // 504 // It returns the destination object if possible. Note that this may 505 // be nil. 506 func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { 507 tr := accounting.Stats(ctx).NewCheckingTransfer(src) 508 defer func() { 509 tr.Done(err) 510 }() 511 newDst = dst 512 if fs.Config.DryRun { 513 fs.Logf(src, "Not moving as --dry-run") 514 return newDst, nil 515 } 516 // See if we have Move available 517 if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) { 518 // Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive) 519 if dst != nil && !SameObject(src, dst) { 520 err = DeleteFile(ctx, dst) 521 if err != nil { 522 return newDst, err 523 } 524 } 525 // Move dst <- src 526 newDst, err = doMove(ctx, src, remote) 527 switch err { 528 case nil: 529 fs.Infof(src, "Moved (server side)") 530 return newDst, nil 531 case fs.ErrorCantMove: 532 fs.Debugf(src, "Can't move, switching to copy") 533 default: 534 err = fs.CountError(err) 535 fs.Errorf(src, "Couldn't move: %v", err) 536 return newDst, err 537 } 538 } 539 // Move not found or didn't work so copy dst <- src 540 newDst, err = Copy(ctx, fdst, dst, remote, src) 541 if err != nil { 542 fs.Errorf(src, "Not deleting source as copy failed: %v", err) 543 return newDst, err 544 } 545 // Delete src if no error on copy 546 return newDst, DeleteFile(ctx, src) 547 } 548 549 // CanServerSideMove returns true if fdst support server side moves or 550 // server side copies 551 // 552 // Some remotes simulate rename by server-side copy and delete, so include 553 // remotes that implements either Mover or Copier. 554 func CanServerSideMove(fdst fs.Fs) bool { 555 canMove := fdst.Features().Move != nil 556 canCopy := fdst.Features().Copy != nil 557 return canMove || canCopy 558 } 559 560 // SuffixName adds the current --suffix to the remote, obeying 561 // --suffix-keep-extension if set 562 func SuffixName(remote string) string { 563 if fs.Config.Suffix == "" { 564 return remote 565 } 566 if fs.Config.SuffixKeepExtension { 567 ext := path.Ext(remote) 568 base := remote[:len(remote)-len(ext)] 569 return base + fs.Config.Suffix + ext 570 } 571 return remote + fs.Config.Suffix 572 } 573 574 // DeleteFileWithBackupDir deletes a single file respecting --dry-run 575 // and accumulating stats and errors. 576 // 577 // If backupDir is set then it moves the file to there instead of 578 // deleting 579 func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { 580 tr := accounting.Stats(ctx).NewCheckingTransfer(dst) 581 defer func() { 582 tr.Done(err) 583 }() 584 numDeletes := accounting.Stats(ctx).Deletes(1) 585 if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { 586 return fserrors.FatalError(errors.New("--max-delete threshold reached")) 587 } 588 action, actioned, actioning := "delete", "Deleted", "deleting" 589 if backupDir != nil { 590 action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir" 591 } 592 if fs.Config.DryRun { 593 fs.Logf(dst, "Not %s as --dry-run", actioning) 594 } else if backupDir != nil { 595 err = MoveBackupDir(ctx, backupDir, dst) 596 } else { 597 err = dst.Remove(ctx) 598 } 599 if err != nil { 600 fs.Errorf(dst, "Couldn't %s: %v", action, err) 601 err = fs.CountError(err) 602 } else if !fs.Config.DryRun { 603 fs.Infof(dst, actioned) 604 } 605 return err 606 } 607 608 // DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors. 609 // 610 // If useBackupDir is set and --backup-dir is in effect then it moves 611 // the file to there instead of deleting 612 func DeleteFile(ctx context.Context, dst fs.Object) (err error) { 613 return DeleteFileWithBackupDir(ctx, dst, nil) 614 } 615 616 // DeleteFilesWithBackupDir removes all the files passed in the 617 // channel 618 // 619 // If backupDir is set the files will be placed into that directory 620 // instead of being deleted. 621 func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { 622 var wg sync.WaitGroup 623 wg.Add(fs.Config.Transfers) 624 var errorCount int32 625 var fatalErrorCount int32 626 627 for i := 0; i < fs.Config.Transfers; i++ { 628 go func() { 629 defer wg.Done() 630 for dst := range toBeDeleted { 631 err := DeleteFileWithBackupDir(ctx, dst, backupDir) 632 if err != nil { 633 atomic.AddInt32(&errorCount, 1) 634 if fserrors.IsFatalError(err) { 635 fs.Errorf(nil, "Got fatal error on delete: %s", err) 636 atomic.AddInt32(&fatalErrorCount, 1) 637 return 638 } 639 } 640 } 641 }() 642 } 643 fs.Infof(nil, "Waiting for deletions to finish") 644 wg.Wait() 645 if errorCount > 0 { 646 err := errors.Errorf("failed to delete %d files", errorCount) 647 if fatalErrorCount > 0 { 648 return fserrors.FatalError(err) 649 } 650 return err 651 } 652 return nil 653 } 654 655 // DeleteFiles removes all the files passed in the channel 656 func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error { 657 return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil) 658 } 659 660 // SameRemoteType returns true if fdst and fsrc are the same type 661 func SameRemoteType(fdst, fsrc fs.Info) bool { 662 return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc) 663 } 664 665 // SameConfig returns true if fdst and fsrc are using the same config 666 // file entry 667 func SameConfig(fdst, fsrc fs.Info) bool { 668 return fdst.Name() == fsrc.Name() 669 } 670 671 // Same returns true if fdst and fsrc point to the same underlying Fs 672 func Same(fdst, fsrc fs.Info) bool { 673 return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/") 674 } 675 676 // fixRoot returns the Root with a trailing / if not empty. It is 677 // aware of case insensitive filesystems. 678 func fixRoot(f fs.Info) string { 679 s := strings.Trim(filepath.ToSlash(f.Root()), "/") 680 if s != "" { 681 s += "/" 682 } 683 if f.Features().CaseInsensitive { 684 s = strings.ToLower(s) 685 } 686 return s 687 } 688 689 // Overlapping returns true if fdst and fsrc point to the same 690 // underlying Fs and they overlap. 691 func Overlapping(fdst, fsrc fs.Info) bool { 692 if !SameConfig(fdst, fsrc) { 693 return false 694 } 695 fdstRoot := fixRoot(fdst) 696 fsrcRoot := fixRoot(fsrc) 697 return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot) 698 } 699 700 // SameDir returns true if fdst and fsrc point to the same 701 // underlying Fs and they are the same directory. 702 func SameDir(fdst, fsrc fs.Info) bool { 703 if !SameConfig(fdst, fsrc) { 704 return false 705 } 706 fdstRoot := fixRoot(fdst) 707 fsrcRoot := fixRoot(fsrc) 708 return fdstRoot == fsrcRoot 709 } 710 711 // checkIdentical checks to see if dst and src are identical 712 // 713 // it returns true if differences were found 714 // it also returns whether it couldn't be hashed 715 func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) { 716 same, ht, err := CheckHashes(ctx, src, dst) 717 if err != nil { 718 // CheckHashes will log and count errors 719 return true, false 720 } 721 if ht == hash.None { 722 return false, true 723 } 724 if !same { 725 err = errors.Errorf("%v differ", ht) 726 fs.Errorf(src, "%v", err) 727 _ = fs.CountError(err) 728 return true, false 729 } 730 return false, false 731 } 732 733 // checkFn is the the type of the checking function used in CheckFn() 734 type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) 735 736 // checkMarch is used to march over two Fses in the same way as 737 // sync/copy 738 type checkMarch struct { 739 fdst, fsrc fs.Fs 740 check checkFn 741 oneway bool 742 differences int32 743 noHashes int32 744 srcFilesMissing int32 745 dstFilesMissing int32 746 matches int32 747 } 748 749 // DstOnly have an object which is in the destination only 750 func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { 751 switch dst.(type) { 752 case fs.Object: 753 if c.oneway { 754 return false 755 } 756 err := errors.Errorf("File not in %v", c.fsrc) 757 fs.Errorf(dst, "%v", err) 758 _ = fs.CountError(err) 759 atomic.AddInt32(&c.differences, 1) 760 atomic.AddInt32(&c.srcFilesMissing, 1) 761 case fs.Directory: 762 // Do the same thing to the entire contents of the directory 763 if c.oneway { 764 return false 765 } 766 return true 767 default: 768 panic("Bad object in DirEntries") 769 } 770 return false 771 } 772 773 // SrcOnly have an object which is in the source only 774 func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { 775 switch src.(type) { 776 case fs.Object: 777 err := errors.Errorf("File not in %v", c.fdst) 778 fs.Errorf(src, "%v", err) 779 _ = fs.CountError(err) 780 atomic.AddInt32(&c.differences, 1) 781 atomic.AddInt32(&c.dstFilesMissing, 1) 782 case fs.Directory: 783 // Do the same thing to the entire contents of the directory 784 return true 785 default: 786 panic("Bad object in DirEntries") 787 } 788 return false 789 } 790 791 // check to see if two objects are identical using the check function 792 func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) { 793 var err error 794 tr := accounting.Stats(ctx).NewCheckingTransfer(src) 795 defer func() { 796 tr.Done(err) 797 }() 798 if sizeDiffers(src, dst) { 799 err = errors.Errorf("Sizes differ") 800 fs.Errorf(src, "%v", err) 801 return true, false 802 } 803 if fs.Config.SizeOnly { 804 return false, false 805 } 806 return c.check(ctx, dst, src) 807 } 808 809 // Match is called when src and dst are present, so sync src to dst 810 func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { 811 switch srcX := src.(type) { 812 case fs.Object: 813 dstX, ok := dst.(fs.Object) 814 if ok { 815 differ, noHash := c.checkIdentical(ctx, dstX, srcX) 816 if differ { 817 atomic.AddInt32(&c.differences, 1) 818 } else { 819 atomic.AddInt32(&c.matches, 1) 820 if noHash { 821 atomic.AddInt32(&c.noHashes, 1) 822 fs.Debugf(dstX, "OK - could not check hash") 823 } else { 824 fs.Debugf(dstX, "OK") 825 } 826 } 827 } else { 828 err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst) 829 fs.Errorf(src, "%v", err) 830 _ = fs.CountError(err) 831 atomic.AddInt32(&c.differences, 1) 832 atomic.AddInt32(&c.dstFilesMissing, 1) 833 } 834 case fs.Directory: 835 // Do the same thing to the entire contents of the directory 836 _, ok := dst.(fs.Directory) 837 if ok { 838 return true 839 } 840 err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc) 841 fs.Errorf(dst, "%v", err) 842 _ = fs.CountError(err) 843 atomic.AddInt32(&c.differences, 1) 844 atomic.AddInt32(&c.srcFilesMissing, 1) 845 846 default: 847 panic("Bad object in DirEntries") 848 } 849 return false 850 } 851 852 // CheckFn checks the files in fsrc and fdst according to Size and 853 // hash using checkFunction on each file to check the hashes. 854 // 855 // checkFunction sees if dst and src are identical 856 // 857 // it returns true if differences were found 858 // it also returns whether it couldn't be hashed 859 func CheckFn(ctx context.Context, fdst, fsrc fs.Fs, check checkFn, oneway bool) error { 860 c := &checkMarch{ 861 fdst: fdst, 862 fsrc: fsrc, 863 check: check, 864 oneway: oneway, 865 } 866 867 // set up a march over fdst and fsrc 868 m := &march.March{ 869 Ctx: ctx, 870 Fdst: fdst, 871 Fsrc: fsrc, 872 Dir: "", 873 Callback: c, 874 } 875 fs.Infof(fdst, "Waiting for checks to finish") 876 err := m.Run() 877 878 if c.dstFilesMissing > 0 { 879 fs.Logf(fdst, "%d files missing", c.dstFilesMissing) 880 } 881 if c.srcFilesMissing > 0 { 882 fs.Logf(fsrc, "%d files missing", c.srcFilesMissing) 883 } 884 885 fs.Logf(fdst, "%d differences found", accounting.Stats(ctx).GetErrors()) 886 if c.noHashes > 0 { 887 fs.Logf(fdst, "%d hashes could not be checked", c.noHashes) 888 } 889 if c.matches > 0 { 890 fs.Logf(fdst, "%d matching files", c.matches) 891 } 892 if c.differences > 0 { 893 return errors.Errorf("%d differences found", c.differences) 894 } 895 return err 896 } 897 898 // Check the files in fsrc and fdst according to Size and hash 899 func Check(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error { 900 return CheckFn(ctx, fdst, fsrc, checkIdentical, oneway) 901 } 902 903 // CheckEqualReaders checks to see if in1 and in2 have the same 904 // content when read. 905 // 906 // it returns true if differences were found 907 func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) { 908 const bufSize = 64 * 1024 909 buf1 := make([]byte, bufSize) 910 buf2 := make([]byte, bufSize) 911 for { 912 n1, err1 := readers.ReadFill(in1, buf1) 913 n2, err2 := readers.ReadFill(in2, buf2) 914 // check errors 915 if err1 != nil && err1 != io.EOF { 916 return true, err1 917 } else if err2 != nil && err2 != io.EOF { 918 return true, err2 919 } 920 // err1 && err2 are nil or io.EOF here 921 // process the data 922 if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) { 923 return true, nil 924 } 925 // if both streams finished the we have finished 926 if err1 == io.EOF && err2 == io.EOF { 927 break 928 } 929 } 930 return false, nil 931 } 932 933 // CheckIdentical checks to see if dst and src are identical by 934 // reading all their bytes if necessary. 935 // 936 // it returns true if differences were found 937 func CheckIdentical(ctx context.Context, dst, src fs.Object) (differ bool, err error) { 938 in1, err := dst.Open(ctx) 939 if err != nil { 940 return true, errors.Wrapf(err, "failed to open %q", dst) 941 } 942 tr1 := accounting.Stats(ctx).NewTransfer(dst) 943 defer func() { 944 tr1.Done(err) 945 }() 946 in1 = tr1.Account(in1).WithBuffer() // account and buffer the transfer 947 948 in2, err := src.Open(ctx) 949 if err != nil { 950 return true, errors.Wrapf(err, "failed to open %q", src) 951 } 952 tr2 := accounting.Stats(ctx).NewTransfer(dst) 953 defer func() { 954 tr2.Done(err) 955 }() 956 in2 = tr2.Account(in2).WithBuffer() // account and buffer the transfer 957 958 // To assign err variable before defer. 959 differ, err = CheckEqualReaders(in1, in2) 960 return 961 } 962 963 // CheckDownload checks the files in fsrc and fdst according to Size 964 // and the actual contents of the files. 965 func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error { 966 check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) { 967 differ, err := CheckIdentical(ctx, a, b) 968 if err != nil { 969 err = fs.CountError(err) 970 fs.Errorf(a, "Failed to download: %v", err) 971 return true, true 972 } 973 return differ, false 974 } 975 return CheckFn(ctx, fdst, fsrc, check, oneway) 976 } 977 978 // ListFn lists the Fs to the supplied function 979 // 980 // Lists in parallel which may get them out of order 981 func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { 982 return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { 983 entries.ForObject(fn) 984 return nil 985 }) 986 } 987 988 // mutex for synchronized output 989 var outMutex sync.Mutex 990 991 // Synchronized fmt.Fprintf 992 // 993 // Ignores errors from Fprintf 994 func syncFprintf(w io.Writer, format string, a ...interface{}) { 995 outMutex.Lock() 996 defer outMutex.Unlock() 997 _, _ = fmt.Fprintf(w, format, a...) 998 } 999 1000 // List the Fs to the supplied writer 1001 // 1002 // Shows size and path - obeys includes and excludes 1003 // 1004 // Lists in parallel which may get them out of order 1005 func List(ctx context.Context, f fs.Fs, w io.Writer) error { 1006 return ListFn(ctx, f, func(o fs.Object) { 1007 syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) 1008 }) 1009 } 1010 1011 // ListLong lists the Fs to the supplied writer 1012 // 1013 // Shows size, mod time and path - obeys includes and excludes 1014 // 1015 // Lists in parallel which may get them out of order 1016 func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { 1017 return ListFn(ctx, f, func(o fs.Object) { 1018 tr := accounting.Stats(ctx).NewCheckingTransfer(o) 1019 defer func() { 1020 tr.Done(nil) 1021 }() 1022 modTime := o.ModTime(ctx) 1023 syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) 1024 }) 1025 } 1026 1027 // Md5sum list the Fs to the supplied writer 1028 // 1029 // Produces the same output as the md5sum command - obeys includes and 1030 // excludes 1031 // 1032 // Lists in parallel which may get them out of order 1033 func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error { 1034 return HashLister(ctx, hash.MD5, f, w) 1035 } 1036 1037 // Sha1sum list the Fs to the supplied writer 1038 // 1039 // Obeys includes and excludes 1040 // 1041 // Lists in parallel which may get them out of order 1042 func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error { 1043 return HashLister(ctx, hash.SHA1, f, w) 1044 } 1045 1046 // hashSum returns the human readable hash for ht passed in. This may 1047 // be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will 1048 // return an error. 1049 func hashSum(ctx context.Context, ht hash.Type, o fs.Object) (string, error) { 1050 var err error 1051 tr := accounting.Stats(ctx).NewCheckingTransfer(o) 1052 defer func() { 1053 tr.Done(err) 1054 }() 1055 sum, err := o.Hash(ctx, ht) 1056 if err == hash.ErrUnsupported { 1057 sum = "UNSUPPORTED" 1058 } else if err != nil { 1059 fs.Debugf(o, "Failed to read %v: %v", ht, err) 1060 sum = "ERROR" 1061 } 1062 return sum, err 1063 } 1064 1065 // HashLister does a md5sum equivalent for the hash type passed in 1066 func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { 1067 return ListFn(ctx, f, func(o fs.Object) { 1068 sum, _ := hashSum(ctx, ht, o) 1069 syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote()) 1070 }) 1071 } 1072 1073 // HashListerBase64 does a md5sum equivalent for the hash type passed in with base64 encoded 1074 func HashListerBase64(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { 1075 return ListFn(ctx, f, func(o fs.Object) { 1076 sum, err := hashSum(ctx, ht, o) 1077 if err == nil { 1078 hexBytes, _ := hex.DecodeString(sum) 1079 sum = base64.URLEncoding.EncodeToString(hexBytes) 1080 } 1081 width := base64.URLEncoding.EncodedLen(hash.Width(ht) / 2) 1082 syncFprintf(w, "%*s %s\n", width, sum, o.Remote()) 1083 }) 1084 } 1085 1086 // Count counts the objects and their sizes in the Fs 1087 // 1088 // Obeys includes and excludes 1089 func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) { 1090 err = ListFn(ctx, f, func(o fs.Object) { 1091 atomic.AddInt64(&objects, 1) 1092 objectSize := o.Size() 1093 if objectSize > 0 { 1094 atomic.AddInt64(&size, objectSize) 1095 } 1096 }) 1097 return 1098 } 1099 1100 // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. 1101 func ConfigMaxDepth(recursive bool) int { 1102 depth := fs.Config.MaxDepth 1103 if !recursive && depth < 0 { 1104 depth = 1 1105 } 1106 return depth 1107 } 1108 1109 // ListDir lists the directories/buckets/containers in the Fs to the supplied writer 1110 func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { 1111 return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { 1112 entries.ForDir(func(dir fs.Directory) { 1113 if dir != nil { 1114 syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) 1115 } 1116 }) 1117 return nil 1118 }) 1119 } 1120 1121 // Mkdir makes a destination directory or container 1122 func Mkdir(ctx context.Context, f fs.Fs, dir string) error { 1123 if fs.Config.DryRun { 1124 fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set") 1125 return nil 1126 } 1127 fs.Debugf(fs.LogDirName(f, dir), "Making directory") 1128 err := f.Mkdir(ctx, dir) 1129 if err != nil { 1130 err = fs.CountError(err) 1131 return err 1132 } 1133 return nil 1134 } 1135 1136 // TryRmdir removes a container but not if not empty. It doesn't 1137 // count errors but may return one. 1138 func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { 1139 if fs.Config.DryRun { 1140 fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set") 1141 return nil 1142 } 1143 fs.Debugf(fs.LogDirName(f, dir), "Removing directory") 1144 return f.Rmdir(ctx, dir) 1145 } 1146 1147 // Rmdir removes a container but not if not empty 1148 func Rmdir(ctx context.Context, f fs.Fs, dir string) error { 1149 err := TryRmdir(ctx, f, dir) 1150 if err != nil { 1151 err = fs.CountError(err) 1152 return err 1153 } 1154 return err 1155 } 1156 1157 // Purge removes a directory and all of its contents 1158 func Purge(ctx context.Context, f fs.Fs, dir string) error { 1159 doFallbackPurge := true 1160 var err error 1161 if dir == "" { 1162 // FIXME change the Purge interface so it takes a dir - see #1891 1163 if doPurge := f.Features().Purge; doPurge != nil { 1164 doFallbackPurge = false 1165 if fs.Config.DryRun { 1166 fs.Logf(f, "Not purging as --dry-run set") 1167 } else { 1168 err = doPurge(ctx) 1169 if err == fs.ErrorCantPurge { 1170 doFallbackPurge = true 1171 } 1172 } 1173 } 1174 } 1175 if doFallbackPurge { 1176 // DeleteFiles and Rmdir observe --dry-run 1177 err = DeleteFiles(ctx, listToChan(ctx, f, dir)) 1178 if err != nil { 1179 return err 1180 } 1181 err = Rmdirs(ctx, f, dir, false) 1182 } 1183 if err != nil { 1184 err = fs.CountError(err) 1185 return err 1186 } 1187 return nil 1188 } 1189 1190 // Delete removes all the contents of a container. Unlike Purge, it 1191 // obeys includes and excludes. 1192 func Delete(ctx context.Context, f fs.Fs) error { 1193 delChan := make(fs.ObjectsChan, fs.Config.Transfers) 1194 delErr := make(chan error, 1) 1195 go func() { 1196 delErr <- DeleteFiles(ctx, delChan) 1197 }() 1198 err := ListFn(ctx, f, func(o fs.Object) { 1199 delChan <- o 1200 }) 1201 close(delChan) 1202 delError := <-delErr 1203 if err == nil { 1204 err = delError 1205 } 1206 return err 1207 } 1208 1209 // listToChan will transfer all objects in the listing to the output 1210 // 1211 // If an error occurs, the error will be logged, and it will close the 1212 // channel. 1213 // 1214 // If the error was ErrorDirNotFound then it will be ignored 1215 func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { 1216 o := make(fs.ObjectsChan, fs.Config.Checkers) 1217 go func() { 1218 defer close(o) 1219 err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { 1220 entries.ForObject(func(obj fs.Object) { 1221 o <- obj 1222 }) 1223 return nil 1224 }) 1225 if err != nil && err != fs.ErrorDirNotFound { 1226 err = errors.Wrap(err, "failed to list") 1227 err = fs.CountError(err) 1228 fs.Errorf(nil, "%v", err) 1229 } 1230 }() 1231 return o 1232 } 1233 1234 // CleanUp removes the trash for the Fs 1235 func CleanUp(ctx context.Context, f fs.Fs) error { 1236 doCleanUp := f.Features().CleanUp 1237 if doCleanUp == nil { 1238 return errors.Errorf("%v doesn't support cleanup", f) 1239 } 1240 if fs.Config.DryRun { 1241 fs.Logf(f, "Not running cleanup as --dry-run set") 1242 return nil 1243 } 1244 return doCleanUp(ctx) 1245 } 1246 1247 // wrap a Reader and a Closer together into a ReadCloser 1248 type readCloser struct { 1249 io.Reader 1250 io.Closer 1251 } 1252 1253 // Cat any files to the io.Writer 1254 // 1255 // if offset == 0 it will be ignored 1256 // if offset > 0 then the file will be seeked to that offset 1257 // if offset < 0 then the file will be seeked that far from the end 1258 // 1259 // if count < 0 then it will be ignored 1260 // if count >= 0 then only that many characters will be output 1261 func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { 1262 var mu sync.Mutex 1263 return ListFn(ctx, f, func(o fs.Object) { 1264 var err error 1265 tr := accounting.Stats(ctx).NewTransfer(o) 1266 defer func() { 1267 tr.Done(err) 1268 }() 1269 opt := fs.RangeOption{Start: offset, End: -1} 1270 size := o.Size() 1271 if opt.Start < 0 { 1272 opt.Start += size 1273 } 1274 if count >= 0 { 1275 opt.End = opt.Start + count - 1 1276 } 1277 var options []fs.OpenOption 1278 if opt.Start > 0 || opt.End >= 0 { 1279 options = append(options, &opt) 1280 } 1281 in, err := o.Open(ctx, options...) 1282 if err != nil { 1283 err = fs.CountError(err) 1284 fs.Errorf(o, "Failed to open: %v", err) 1285 return 1286 } 1287 if count >= 0 { 1288 in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in} 1289 } 1290 in = tr.Account(in).WithBuffer() // account and buffer the transfer 1291 // take the lock just before we output stuff, so at the last possible moment 1292 mu.Lock() 1293 defer mu.Unlock() 1294 _, err = io.Copy(w, in) 1295 if err != nil { 1296 err = fs.CountError(err) 1297 fs.Errorf(o, "Failed to send to output: %v", err) 1298 } 1299 }) 1300 } 1301 1302 // Rcat reads data from the Reader until EOF and uploads it to a file on remote 1303 func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { 1304 tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1) 1305 defer func() { 1306 tr.Done(err) 1307 }() 1308 in = tr.Account(in).WithBuffer() 1309 1310 hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash 1311 hashOption := &fs.HashesOption{Hashes: hashes} 1312 hash, err := hash.NewMultiHasherTypes(hashes) 1313 if err != nil { 1314 return nil, err 1315 } 1316 readCounter := readers.NewCountingReader(in) 1317 trackingIn := io.TeeReader(readCounter, hash) 1318 1319 compare := func(dst fs.Object) error { 1320 src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst) 1321 if !Equal(ctx, src, dst) { 1322 err = errors.Errorf("corrupted on transfer") 1323 err = fs.CountError(err) 1324 fs.Errorf(dst, "%v", err) 1325 return err 1326 } 1327 return nil 1328 } 1329 1330 // check if file small enough for direct upload 1331 buf := make([]byte, fs.Config.StreamingUploadCutoff) 1332 if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { 1333 fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) 1334 src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) 1335 return Copy(ctx, fdst, nil, dstFileName, src) 1336 } 1337 1338 // Make a new ReadCloser with the bits we've already read 1339 in = &readCloser{ 1340 Reader: io.MultiReader(bytes.NewReader(buf), trackingIn), 1341 Closer: in, 1342 } 1343 1344 fStreamTo := fdst 1345 canStream := fdst.Features().PutStream != nil 1346 if !canStream { 1347 fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") 1348 tmpLocalFs, err := fs.TemporaryLocalFs() 1349 if err != nil { 1350 return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") 1351 } 1352 defer func() { 1353 err := Purge(ctx, tmpLocalFs, "") 1354 if err != nil { 1355 fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) 1356 } 1357 }() 1358 fStreamTo = tmpLocalFs 1359 } 1360 1361 if fs.Config.DryRun { 1362 fs.Logf("stdin", "Not uploading as --dry-run") 1363 // prevents "broken pipe" errors 1364 _, err = io.Copy(ioutil.Discard, in) 1365 return nil, err 1366 } 1367 1368 objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) 1369 if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, hashOption); err != nil { 1370 return dst, err 1371 } 1372 if err = compare(dst); err != nil { 1373 return dst, err 1374 } 1375 if !canStream { 1376 // copy dst (which is the local object we have just streamed to) to the remote 1377 return Copy(ctx, fdst, nil, dstFileName, dst) 1378 } 1379 return dst, nil 1380 } 1381 1382 // PublicLink adds a "readable by anyone with link" permission on the given file or folder. 1383 func PublicLink(ctx context.Context, f fs.Fs, remote string) (string, error) { 1384 doPublicLink := f.Features().PublicLink 1385 if doPublicLink == nil { 1386 return "", errors.Errorf("%v doesn't support public links", f) 1387 } 1388 return doPublicLink(ctx, remote) 1389 } 1390 1391 // Rmdirs removes any empty directories (or directories only 1392 // containing empty directories) under f, including f. 1393 func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { 1394 dirEmpty := make(map[string]bool) 1395 dirEmpty[dir] = !leaveRoot 1396 err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { 1397 if err != nil { 1398 err = fs.CountError(err) 1399 fs.Errorf(f, "Failed to list %q: %v", dirPath, err) 1400 return nil 1401 } 1402 for _, entry := range entries { 1403 switch x := entry.(type) { 1404 case fs.Directory: 1405 // add a new directory as empty 1406 dir := x.Remote() 1407 _, found := dirEmpty[dir] 1408 if !found { 1409 dirEmpty[dir] = true 1410 } 1411 case fs.Object: 1412 // mark the parents of the file as being non-empty 1413 dir := x.Remote() 1414 for dir != "" { 1415 dir = path.Dir(dir) 1416 if dir == "." || dir == "/" { 1417 dir = "" 1418 } 1419 empty, found := dirEmpty[dir] 1420 // End if we reach a directory which is non-empty 1421 if found && !empty { 1422 break 1423 } 1424 dirEmpty[dir] = false 1425 } 1426 } 1427 } 1428 return nil 1429 }) 1430 if err != nil { 1431 return errors.Wrap(err, "failed to rmdirs") 1432 } 1433 // Now delete the empty directories, starting from the longest path 1434 var toDelete []string 1435 for dir, empty := range dirEmpty { 1436 if empty { 1437 toDelete = append(toDelete, dir) 1438 } 1439 } 1440 sort.Strings(toDelete) 1441 for i := len(toDelete) - 1; i >= 0; i-- { 1442 dir := toDelete[i] 1443 err := TryRmdir(ctx, f, dir) 1444 if err != nil { 1445 err = fs.CountError(err) 1446 fs.Errorf(dir, "Failed to rmdir: %v", err) 1447 return err 1448 } 1449 } 1450 return nil 1451 } 1452 1453 // GetCompareDest sets up --compare-dest 1454 func GetCompareDest() (CompareDest fs.Fs, err error) { 1455 CompareDest, err = cache.Get(fs.Config.CompareDest) 1456 if err != nil { 1457 return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err)) 1458 } 1459 return CompareDest, nil 1460 } 1461 1462 // compareDest checks --compare-dest to see if src needs to 1463 // be copied 1464 // 1465 // Returns True if src is in --compare-dest 1466 func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) { 1467 var remote string 1468 if dst == nil { 1469 remote = src.Remote() 1470 } else { 1471 remote = dst.Remote() 1472 } 1473 CompareDestFile, err := CompareDest.NewObject(ctx, remote) 1474 switch err { 1475 case fs.ErrorObjectNotFound: 1476 return false, nil 1477 case nil: 1478 break 1479 default: 1480 return false, err 1481 } 1482 if Equal(ctx, src, CompareDestFile) { 1483 fs.Debugf(src, "Destination found in --compare-dest, skipping") 1484 return true, nil 1485 } 1486 return false, nil 1487 } 1488 1489 // GetCopyDest sets up --copy-dest 1490 func GetCopyDest(fdst fs.Fs) (CopyDest fs.Fs, err error) { 1491 CopyDest, err = cache.Get(fs.Config.CopyDest) 1492 if err != nil { 1493 return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err)) 1494 } 1495 if !SameConfig(fdst, CopyDest) { 1496 return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) 1497 } 1498 if CopyDest.Features().Copy == nil { 1499 return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server side copy")) 1500 } 1501 return CopyDest, nil 1502 } 1503 1504 // copyDest checks --copy-dest to see if src needs to 1505 // be copied 1506 // 1507 // Returns True if src was copied from --copy-dest 1508 func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { 1509 var remote string 1510 if dst == nil { 1511 remote = src.Remote() 1512 } else { 1513 remote = dst.Remote() 1514 } 1515 CopyDestFile, err := CopyDest.NewObject(ctx, remote) 1516 switch err { 1517 case fs.ErrorObjectNotFound: 1518 return false, nil 1519 case nil: 1520 break 1521 default: 1522 return false, err 1523 } 1524 opt := defaultEqualOpt() 1525 opt.updateModTime = false 1526 if equal(ctx, src, CopyDestFile, opt) { 1527 if dst == nil || !Equal(ctx, src, dst) { 1528 if dst != nil && backupDir != nil { 1529 err = MoveBackupDir(ctx, backupDir, dst) 1530 if err != nil { 1531 return false, errors.Wrap(err, "moving to --backup-dir failed") 1532 } 1533 // If successful zero out the dstObj as it is no longer there 1534 dst = nil 1535 } 1536 _, err := Copy(ctx, fdst, dst, remote, CopyDestFile) 1537 if err != nil { 1538 fs.Errorf(src, "Destination found in --copy-dest, error copying") 1539 return false, nil 1540 } 1541 fs.Debugf(src, "Destination found in --copy-dest, using server side copy") 1542 return true, nil 1543 } 1544 fs.Debugf(src, "Unchanged skipping") 1545 return true, nil 1546 } 1547 fs.Debugf(src, "Destination not found in --copy-dest") 1548 return false, nil 1549 } 1550 1551 // CompareOrCopyDest checks --compare-dest and --copy-dest to see if src 1552 // does not need to be copied 1553 // 1554 // Returns True if src does not need to be copied 1555 func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { 1556 if fs.Config.CompareDest != "" { 1557 return compareDest(ctx, dst, src, CompareOrCopyDest) 1558 } else if fs.Config.CopyDest != "" { 1559 return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir) 1560 } 1561 return false, nil 1562 } 1563 1564 // NeedTransfer checks to see if src needs to be copied to dst using 1565 // the current config. 1566 // 1567 // Returns a flag which indicates whether the file needs to be 1568 // transferred or not. 1569 func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { 1570 if dst == nil { 1571 fs.Debugf(src, "Need to transfer - File not found at Destination") 1572 return true 1573 } 1574 // If we should ignore existing files, don't transfer 1575 if fs.Config.IgnoreExisting { 1576 fs.Debugf(src, "Destination exists, skipping") 1577 return false 1578 } 1579 // If we should upload unconditionally 1580 if fs.Config.IgnoreTimes { 1581 fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") 1582 return true 1583 } 1584 // If UpdateOlder is in effect, skip if dst is newer than src 1585 if fs.Config.UpdateOlder { 1586 srcModTime := src.ModTime(ctx) 1587 dstModTime := dst.ModTime(ctx) 1588 dt := dstModTime.Sub(srcModTime) 1589 // If have a mutually agreed precision then use that 1590 modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs()) 1591 if modifyWindow == fs.ModTimeNotSupported { 1592 // Otherwise use 1 second as a safe default as 1593 // the resolution of the time a file was 1594 // uploaded. 1595 modifyWindow = time.Second 1596 } 1597 switch { 1598 case dt >= modifyWindow: 1599 fs.Debugf(src, "Destination is newer than source, skipping") 1600 return false 1601 case dt <= -modifyWindow: 1602 // force --checksum on for the check and do update modtimes by default 1603 opt := defaultEqualOpt() 1604 opt.forceModTimeMatch = true 1605 if equal(ctx, src, dst, opt) { 1606 fs.Debugf(src, "Unchanged skipping") 1607 return false 1608 } 1609 default: 1610 // Do a size only compare unless --checksum is set 1611 opt := defaultEqualOpt() 1612 opt.sizeOnly = !fs.Config.CheckSum 1613 if equal(ctx, src, dst, opt) { 1614 fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow) 1615 return false 1616 } 1617 fs.Debugf(src, "Destination mod time is within %v of source but files differ, transferring", modifyWindow) 1618 } 1619 } else { 1620 // Check to see if changed or not 1621 if Equal(ctx, src, dst) { 1622 fs.Debugf(src, "Unchanged skipping") 1623 return false 1624 } 1625 } 1626 return true 1627 } 1628 1629 // RcatSize reads data from the Reader until EOF and uploads it to a file on remote. 1630 // Pass in size >=0 if known, <0 if not known 1631 func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { 1632 var obj fs.Object 1633 1634 if size >= 0 { 1635 var err error 1636 // Size known use Put 1637 tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size) 1638 defer func() { 1639 tr.Done(err) 1640 }() 1641 body := ioutil.NopCloser(in) // we let the server close the body 1642 in := tr.Account(body) // account the transfer (no buffering) 1643 1644 if fs.Config.DryRun { 1645 fs.Logf("stdin", "Not uploading as --dry-run") 1646 // prevents "broken pipe" errors 1647 _, err = io.Copy(ioutil.Discard, in) 1648 return nil, err 1649 } 1650 1651 info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst) 1652 obj, err = fdst.Put(ctx, in, info) 1653 if err != nil { 1654 fs.Errorf(dstFileName, "Post request put error: %v", err) 1655 1656 return nil, err 1657 } 1658 } else { 1659 // Size unknown use Rcat 1660 obj, err = Rcat(ctx, fdst, dstFileName, in, modTime) 1661 if err != nil { 1662 fs.Errorf(dstFileName, "Post request rcat error: %v", err) 1663 1664 return nil, err 1665 } 1666 } 1667 1668 return obj, nil 1669 } 1670 1671 // copyURLFunc is called from CopyURLFn 1672 type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) 1673 1674 // copyURLFn copies the data from the url to the function supplied 1675 func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) { 1676 client := fshttp.NewClient(fs.Config) 1677 resp, err := client.Get(url) 1678 if err != nil { 1679 return err 1680 } 1681 defer fs.CheckClose(resp.Body, &err) 1682 if resp.StatusCode < 200 || resp.StatusCode >= 300 { 1683 return errors.Errorf("CopyURL failed: %s", resp.Status) 1684 } 1685 modTime, err := http.ParseTime(resp.Header.Get("Last-Modified")) 1686 if err != nil { 1687 modTime = time.Now() 1688 } 1689 if dstFileNameFromURL { 1690 dstFileName = path.Base(resp.Request.URL.Path) 1691 if dstFileName == "." || dstFileName == "/" { 1692 return errors.Errorf("CopyURL failed: file name wasn't found in url") 1693 } 1694 } 1695 return fn(ctx, dstFileName, resp.Body, resp.ContentLength, modTime) 1696 } 1697 1698 // CopyURL copies the data from the url to (fdst, dstFileName) 1699 func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool) (dst fs.Object, err error) { 1700 err = copyURLFn(ctx, dstFileName, url, dstFileNameFromURL, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { 1701 dst, err = RcatSize(ctx, fdst, dstFileName, in, size, modTime) 1702 return err 1703 }) 1704 return dst, err 1705 } 1706 1707 // CopyURLToWriter copies the data from the url to the io.Writer supplied 1708 func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error) { 1709 return copyURLFn(ctx, "", url, false, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { 1710 _, err = io.Copy(out, in) 1711 return err 1712 }) 1713 } 1714 1715 // BackupDir returns the correctly configured --backup-dir 1716 func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) { 1717 if fs.Config.BackupDir != "" { 1718 backupDir, err = cache.Get(fs.Config.BackupDir) 1719 if err != nil { 1720 return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err)) 1721 } 1722 if !SameConfig(fdst, backupDir) { 1723 return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) 1724 } 1725 if srcFileName == "" { 1726 if Overlapping(fdst, backupDir) { 1727 return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap")) 1728 } 1729 if Overlapping(fsrc, backupDir) { 1730 return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap")) 1731 } 1732 } else { 1733 if fs.Config.Suffix == "" { 1734 if SameDir(fdst, backupDir) { 1735 return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same")) 1736 } 1737 if SameDir(fsrc, backupDir) { 1738 return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same")) 1739 } 1740 } 1741 } 1742 } else { 1743 if srcFileName == "" { 1744 return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir")) 1745 } 1746 // --backup-dir is not set but --suffix is - use the destination as the backupDir 1747 backupDir = fdst 1748 } 1749 if !CanServerSideMove(backupDir) { 1750 return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy")) 1751 } 1752 return backupDir, nil 1753 } 1754 1755 // MoveBackupDir moves a file to the backup dir 1756 func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) { 1757 remoteWithSuffix := SuffixName(dst.Remote()) 1758 overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) 1759 _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) 1760 return err 1761 } 1762 1763 // moveOrCopyFile moves or copies a single file possibly to a new name 1764 func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { 1765 dstFilePath := path.Join(fdst.Root(), dstFileName) 1766 srcFilePath := path.Join(fsrc.Root(), srcFileName) 1767 if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { 1768 fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) 1769 return nil 1770 } 1771 1772 // Choose operations 1773 Op := Move 1774 if cp { 1775 Op = Copy 1776 } 1777 1778 // Find src object 1779 srcObj, err := fsrc.NewObject(ctx, srcFileName) 1780 if err != nil { 1781 return err 1782 } 1783 1784 // Find dst object if it exists 1785 var dstObj fs.Object 1786 if !fs.Config.NoCheckDest { 1787 dstObj, err = fdst.NewObject(ctx, dstFileName) 1788 if err == fs.ErrorObjectNotFound { 1789 dstObj = nil 1790 } else if err != nil { 1791 return err 1792 } 1793 } 1794 1795 // Special case for changing case of a file on a case insensitive remote 1796 // This will move the file to a temporary name then 1797 // move it back to the intended destination. This is required 1798 // to avoid issues with certain remotes and avoid file deletion. 1799 if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { 1800 // Create random name to temporarily move file to 1801 tmpObjName := dstFileName + "-rclone-move-" + random.String(8) 1802 _, err := fdst.NewObject(ctx, tmpObjName) 1803 if err != fs.ErrorObjectNotFound { 1804 if err == nil { 1805 return errors.New("found an already existing file with a randomly generated name. Try the operation again") 1806 } 1807 return errors.Wrap(err, "error while attempting to move file to a temporary location") 1808 } 1809 tr := accounting.Stats(ctx).NewTransfer(srcObj) 1810 defer func() { 1811 tr.Done(err) 1812 }() 1813 tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) 1814 if err != nil { 1815 return errors.Wrap(err, "error while moving file to temporary location") 1816 } 1817 _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) 1818 return err 1819 } 1820 1821 var backupDir, copyDestDir fs.Fs 1822 if fs.Config.BackupDir != "" || fs.Config.Suffix != "" { 1823 backupDir, err = BackupDir(fdst, fsrc, srcFileName) 1824 if err != nil { 1825 return errors.Wrap(err, "creating Fs for --backup-dir failed") 1826 } 1827 } 1828 if fs.Config.CompareDest != "" { 1829 copyDestDir, err = GetCompareDest() 1830 if err != nil { 1831 return err 1832 } 1833 } else if fs.Config.CopyDest != "" { 1834 copyDestDir, err = GetCopyDest(fdst) 1835 if err != nil { 1836 return err 1837 } 1838 } 1839 NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir) 1840 if err != nil { 1841 return err 1842 } 1843 if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) { 1844 // If destination already exists, then we must move it into --backup-dir if required 1845 if dstObj != nil && backupDir != nil { 1846 err = MoveBackupDir(ctx, backupDir, dstObj) 1847 if err != nil { 1848 return errors.Wrap(err, "moving to --backup-dir failed") 1849 } 1850 // If successful zero out the dstObj as it is no longer there 1851 dstObj = nil 1852 } 1853 1854 _, err = Op(ctx, fdst, dstObj, dstFileName, srcObj) 1855 } else { 1856 tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj) 1857 if !cp { 1858 err = DeleteFile(ctx, srcObj) 1859 } 1860 tr.Done(err) 1861 } 1862 return err 1863 } 1864 1865 // MoveFile moves a single file possibly to a new name 1866 func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { 1867 return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) 1868 } 1869 1870 // CopyFile moves a single file possibly to a new name 1871 func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { 1872 return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true) 1873 } 1874 1875 // SetTier changes tier of object in remote 1876 func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error { 1877 return ListFn(ctx, fsrc, func(o fs.Object) { 1878 objImpl, ok := o.(fs.SetTierer) 1879 if !ok { 1880 fs.Errorf(fsrc, "Remote object does not implement SetTier") 1881 return 1882 } 1883 err := objImpl.SetTier(tier) 1884 if err != nil { 1885 fs.Errorf(fsrc, "Failed to do SetTier, %v", err) 1886 } 1887 }) 1888 } 1889 1890 // ListFormat defines files information print format 1891 type ListFormat struct { 1892 separator string 1893 dirSlash bool 1894 absolute bool 1895 output []func(entry *ListJSONItem) string 1896 csv *csv.Writer 1897 buf bytes.Buffer 1898 } 1899 1900 // SetSeparator changes separator in struct 1901 func (l *ListFormat) SetSeparator(separator string) { 1902 l.separator = separator 1903 } 1904 1905 // SetDirSlash defines if slash should be printed 1906 func (l *ListFormat) SetDirSlash(dirSlash bool) { 1907 l.dirSlash = dirSlash 1908 } 1909 1910 // SetAbsolute prints a leading slash in front of path names 1911 func (l *ListFormat) SetAbsolute(absolute bool) { 1912 l.absolute = absolute 1913 } 1914 1915 // SetCSV defines if the output should be csv 1916 // 1917 // Note that you should call SetSeparator before this if you want a 1918 // custom separator 1919 func (l *ListFormat) SetCSV(useCSV bool) { 1920 if useCSV { 1921 l.csv = csv.NewWriter(&l.buf) 1922 if l.separator != "" { 1923 l.csv.Comma = []rune(l.separator)[0] 1924 } 1925 } else { 1926 l.csv = nil 1927 } 1928 } 1929 1930 // SetOutput sets functions used to create files information 1931 func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) { 1932 l.output = output 1933 } 1934 1935 // AddModTime adds file's Mod Time to output 1936 func (l *ListFormat) AddModTime() { 1937 l.AppendOutput(func(entry *ListJSONItem) string { 1938 return entry.ModTime.When.Local().Format("2006-01-02 15:04:05") 1939 }) 1940 } 1941 1942 // AddSize adds file's size to output 1943 func (l *ListFormat) AddSize() { 1944 l.AppendOutput(func(entry *ListJSONItem) string { 1945 return strconv.FormatInt(entry.Size, 10) 1946 }) 1947 } 1948 1949 // normalisePath makes sure the path has the correct slashes for the current mode 1950 func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string { 1951 if l.absolute && !strings.HasPrefix(remote, "/") { 1952 remote = "/" + remote 1953 } 1954 if entry.IsDir && l.dirSlash { 1955 remote += "/" 1956 } 1957 return remote 1958 } 1959 1960 // AddPath adds path to file to output 1961 func (l *ListFormat) AddPath() { 1962 l.AppendOutput(func(entry *ListJSONItem) string { 1963 return l.normalisePath(entry, entry.Path) 1964 }) 1965 } 1966 1967 // AddEncrypted adds the encrypted path to file to output 1968 func (l *ListFormat) AddEncrypted() { 1969 l.AppendOutput(func(entry *ListJSONItem) string { 1970 return l.normalisePath(entry, entry.Encrypted) 1971 }) 1972 } 1973 1974 // AddHash adds the hash of the type given to the output 1975 func (l *ListFormat) AddHash(ht hash.Type) { 1976 hashName := ht.String() 1977 l.AppendOutput(func(entry *ListJSONItem) string { 1978 if entry.IsDir { 1979 return "" 1980 } 1981 return entry.Hashes[hashName] 1982 }) 1983 } 1984 1985 // AddID adds file's ID to the output if known 1986 func (l *ListFormat) AddID() { 1987 l.AppendOutput(func(entry *ListJSONItem) string { 1988 return entry.ID 1989 }) 1990 } 1991 1992 // AddOrigID adds file's Original ID to the output if known 1993 func (l *ListFormat) AddOrigID() { 1994 l.AppendOutput(func(entry *ListJSONItem) string { 1995 return entry.OrigID 1996 }) 1997 } 1998 1999 // AddTier adds file's Tier to the output if known 2000 func (l *ListFormat) AddTier() { 2001 l.AppendOutput(func(entry *ListJSONItem) string { 2002 return entry.Tier 2003 }) 2004 } 2005 2006 // AddMimeType adds file's MimeType to the output if known 2007 func (l *ListFormat) AddMimeType() { 2008 l.AppendOutput(func(entry *ListJSONItem) string { 2009 return entry.MimeType 2010 }) 2011 } 2012 2013 // AppendOutput adds string generated by specific function to printed output 2014 func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) { 2015 l.output = append(l.output, functionToAppend) 2016 } 2017 2018 // Format prints information about the DirEntry in the format defined 2019 func (l *ListFormat) Format(entry *ListJSONItem) (result string) { 2020 var out []string 2021 for _, fun := range l.output { 2022 out = append(out, fun(entry)) 2023 } 2024 if l.csv != nil { 2025 l.buf.Reset() 2026 _ = l.csv.Write(out) // can't fail writing to bytes.Buffer 2027 l.csv.Flush() 2028 result = strings.TrimRight(l.buf.String(), "\n") 2029 } else { 2030 result = strings.Join(out, l.separator) 2031 } 2032 return result 2033 } 2034 2035 // DirMove renames srcRemote to dstRemote 2036 // 2037 // It does this by loading the directory tree into memory (using ListR 2038 // if available) and doing renames in parallel. 2039 func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { 2040 // Use DirMove if possible 2041 if doDirMove := f.Features().DirMove; doDirMove != nil { 2042 return doDirMove(ctx, f, srcRemote, dstRemote) 2043 } 2044 2045 // Load the directory tree into memory 2046 tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1) 2047 if err != nil { 2048 return errors.Wrap(err, "RenameDir tree walk") 2049 } 2050 2051 // Get the directories in sorted order 2052 dirs := tree.Dirs() 2053 2054 // Make the destination directories - must be done in order not in parallel 2055 for _, dir := range dirs { 2056 dstPath := dstRemote + dir[len(srcRemote):] 2057 err := f.Mkdir(ctx, dstPath) 2058 if err != nil { 2059 return errors.Wrap(err, "RenameDir mkdir") 2060 } 2061 } 2062 2063 // Rename the files in parallel 2064 type rename struct { 2065 o fs.Object 2066 newPath string 2067 } 2068 renames := make(chan rename, fs.Config.Transfers) 2069 g, gCtx := errgroup.WithContext(context.Background()) 2070 for i := 0; i < fs.Config.Transfers; i++ { 2071 g.Go(func() error { 2072 for job := range renames { 2073 dstOverwritten, _ := f.NewObject(gCtx, job.newPath) 2074 _, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o) 2075 if err != nil { 2076 return err 2077 } 2078 select { 2079 case <-gCtx.Done(): 2080 return gCtx.Err() 2081 default: 2082 } 2083 2084 } 2085 return nil 2086 }) 2087 } 2088 for dir, entries := range tree { 2089 dstPath := dstRemote + dir[len(srcRemote):] 2090 for _, entry := range entries { 2091 if o, ok := entry.(fs.Object); ok { 2092 renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))} 2093 } 2094 } 2095 } 2096 close(renames) 2097 err = g.Wait() 2098 if err != nil { 2099 return errors.Wrap(err, "RenameDir renames") 2100 } 2101 2102 // Remove the source directories in reverse order 2103 for i := len(dirs) - 1; i >= 0; i-- { 2104 err := f.Rmdir(ctx, dirs[i]) 2105 if err != nil { 2106 return errors.Wrap(err, "RenameDir rmdir") 2107 } 2108 } 2109 2110 return nil 2111 } 2112 2113 // FsInfo provides information about a remote 2114 type FsInfo struct { 2115 // Name of the remote (as passed into NewFs) 2116 Name string 2117 2118 // Root of the remote (as passed into NewFs) 2119 Root string 2120 2121 // String returns a description of the FS 2122 String string 2123 2124 // Precision of the ModTimes in this Fs in Nanoseconds 2125 Precision time.Duration 2126 2127 // Returns the supported hash types of the filesystem 2128 Hashes []string 2129 2130 // Features returns the optional features of this Fs 2131 Features map[string]bool 2132 } 2133 2134 // GetFsInfo gets the information (FsInfo) about a given Fs 2135 func GetFsInfo(f fs.Fs) *FsInfo { 2136 info := &FsInfo{ 2137 Name: f.Name(), 2138 Root: f.Root(), 2139 String: f.String(), 2140 Precision: f.Precision(), 2141 Hashes: make([]string, 0, 4), 2142 Features: f.Features().Enabled(), 2143 } 2144 for _, hashType := range f.Hashes().Array() { 2145 info.Hashes = append(info.Hashes, hashType.String()) 2146 } 2147 return info 2148 }