github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/fs/sync/sync.go (about) 1 // Package sync is the implementation of sync/copy/move 2 package sync 3 4 import ( 5 "context" 6 "errors" 7 "fmt" 8 "path" 9 "sort" 10 "strings" 11 "sync" 12 "time" 13 14 "github.com/rclone/rclone/fs" 15 "github.com/rclone/rclone/fs/accounting" 16 "github.com/rclone/rclone/fs/filter" 17 "github.com/rclone/rclone/fs/fserrors" 18 "github.com/rclone/rclone/fs/hash" 19 "github.com/rclone/rclone/fs/march" 20 "github.com/rclone/rclone/fs/operations" 21 "github.com/rclone/rclone/lib/errcount" 22 "golang.org/x/sync/errgroup" 23 ) 24 25 // ErrorMaxDurationReached defines error when transfer duration is reached 26 // Used for checking on exit and matching to correct exit code. 27 var ErrorMaxDurationReached = errors.New("max transfer duration reached as set by --max-duration") 28 29 // ErrorMaxDurationReachedFatal is returned from when the max 30 // duration limit is reached. 31 var ErrorMaxDurationReachedFatal = fserrors.FatalError(ErrorMaxDurationReached) 32 33 type syncCopyMove struct { 34 // parameters 35 fdst fs.Fs 36 fsrc fs.Fs 37 deleteMode fs.DeleteMode // how we are doing deletions 38 DoMove bool 39 copyEmptySrcDirs bool 40 deleteEmptySrcDirs bool 41 dir string 42 // internal state 43 ci *fs.ConfigInfo // global config 44 fi *filter.Filter // filter config 45 ctx context.Context // internal context for controlling go-routines 46 cancel func() // cancel the context 47 inCtx context.Context // internal context for controlling march 48 inCancel func() // cancel the march context 49 noTraverse bool // if set don't traverse the dst 50 noCheckDest bool // if set transfer all objects regardless without checking dst 51 noUnicodeNormalization bool // don't normalize unicode characters in filenames 52 deletersWg sync.WaitGroup // for delete before go routine 53 deleteFilesCh chan fs.Object // channel to receive deletes if delete before 54 trackRenames bool // set if we should do server-side renames 55 trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames 56 dstFilesMu sync.Mutex // protect dstFiles 57 dstFiles map[string]fs.Object // dst files, always filled 58 srcFiles map[string]fs.Object // src files, only used if deleteBefore 59 srcFilesChan chan fs.Object // passes src objects 60 srcFilesResult chan error // error result of src listing 61 dstFilesResult chan error // error result of dst listing 62 dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs 63 dstEmptyDirs map[string]fs.DirEntry // potentially empty directories 64 srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs 65 srcEmptyDirs map[string]fs.DirEntry // potentially empty directories 66 srcMoveEmptyDirs map[string]fs.DirEntry // potentially empty directories when moving files out of them 67 checkerWg sync.WaitGroup // wait for checkers 68 toBeChecked *pipe // checkers channel 69 transfersWg sync.WaitGroup // wait for transfers 70 toBeUploaded *pipe // copiers channel 71 errorMu sync.Mutex // Mutex covering the errors variables 72 err error // normal error from copy process 73 noRetryErr error // error with NoRetry set 74 fatalErr error // fatal error 75 commonHash hash.Type // common hash type between src and dst 76 modifyWindow time.Duration // modify window between fsrc, fdst 77 renameMapMu sync.Mutex // mutex to protect the below 78 renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames 79 renamerWg sync.WaitGroup // wait for renamers 80 toBeRenamed *pipe // renamers channel 81 trackRenamesWg sync.WaitGroup // wg for background track renames 82 trackRenamesCh chan fs.Object // objects are pumped in here 83 renameCheck []fs.Object // accumulate files to check for rename here 84 compareCopyDest []fs.Fs // place to check for files to server side copy 85 backupDir fs.Fs // place to store overwrites/deletes 86 checkFirst bool // if set run all the checkers before starting transfers 87 maxDurationEndTime time.Time // end time if --max-duration is set 88 logger operations.LoggerFn // LoggerFn used to report the results of a sync (or bisync) to an io.Writer 89 usingLogger bool // whether we are using logger 90 setDirMetadata bool // if set we set the directory metadata 91 setDirModTime bool // if set we set the directory modtimes 92 setDirModTimeAfter bool // if set we set the directory modtimes at the end of the sync 93 setDirModTimeMu sync.Mutex // protect setDirModTimes and modifiedDirs 94 setDirModTimes []setDirModTime // directories that need their modtime set 95 setDirModTimesMaxLevel int // max level of the directories to set 96 modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter) 97 } 98 99 // For keeping track of delayed modtime sets 100 type setDirModTime struct { 101 src fs.Directory // if set the metadata should be set too 102 dst fs.Directory 103 dir string 104 modTime time.Time 105 level int // the level of the directory, 0 is root 106 } 107 108 type trackRenamesStrategy byte 109 110 const ( 111 trackRenamesStrategyHash trackRenamesStrategy = 1 << iota 112 trackRenamesStrategyModtime 113 trackRenamesStrategyLeaf 114 ) 115 116 func (strategy trackRenamesStrategy) hash() bool { 117 return (strategy & trackRenamesStrategyHash) != 0 118 } 119 120 func (strategy trackRenamesStrategy) modTime() bool { 121 return (strategy & trackRenamesStrategyModtime) != 0 122 } 123 124 func (strategy trackRenamesStrategy) leaf() bool { 125 return (strategy & trackRenamesStrategyLeaf) != 0 126 } 127 128 func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) { 129 if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) { 130 return nil, fserrors.FatalError(fs.ErrorOverlapping) 131 } 132 ci := fs.GetConfig(ctx) 133 fi := filter.GetConfig(ctx) 134 s := &syncCopyMove{ 135 ci: ci, 136 fi: fi, 137 fdst: fdst, 138 fsrc: fsrc, 139 deleteMode: deleteMode, 140 DoMove: DoMove, 141 copyEmptySrcDirs: copyEmptySrcDirs, 142 deleteEmptySrcDirs: deleteEmptySrcDirs, 143 dir: "", 144 srcFilesChan: make(chan fs.Object, ci.Checkers+ci.Transfers), 145 srcFilesResult: make(chan error, 1), 146 dstFilesResult: make(chan error, 1), 147 dstEmptyDirs: make(map[string]fs.DirEntry), 148 srcEmptyDirs: make(map[string]fs.DirEntry), 149 srcMoveEmptyDirs: make(map[string]fs.DirEntry), 150 noTraverse: ci.NoTraverse, 151 noCheckDest: ci.NoCheckDest, 152 noUnicodeNormalization: ci.NoUnicodeNormalization, 153 deleteFilesCh: make(chan fs.Object, ci.Checkers), 154 trackRenames: ci.TrackRenames, 155 commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(), 156 modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst), 157 trackRenamesCh: make(chan fs.Object, ci.Checkers), 158 checkFirst: ci.CheckFirst, 159 setDirMetadata: ci.Metadata && fsrc.Features().ReadDirMetadata && fdst.Features().WriteDirMetadata, 160 setDirModTime: (!ci.NoUpdateDirModTime && fsrc.Features().CanHaveEmptyDirectories) && (fdst.Features().WriteDirSetModTime || fdst.Features().MkdirMetadata != nil || fdst.Features().DirSetModTime != nil), 161 setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite), 162 modifiedDirs: make(map[string]struct{}), 163 } 164 165 s.logger, s.usingLogger = operations.GetLogger(ctx) 166 167 if deleteMode == fs.DeleteModeOff { 168 loggerOpt := operations.GetLoggerOpt(ctx) 169 loggerOpt.DeleteModeOff = true 170 loggerOpt.LoggerFn = s.logger 171 ctx = operations.WithLoggerOpt(ctx, loggerOpt) 172 } 173 174 backlog := ci.MaxBacklog 175 if s.checkFirst { 176 fs.Infof(s.fdst, "Running all checks before starting transfers") 177 backlog = -1 178 } 179 var err error 180 s.toBeChecked, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog) 181 if err != nil { 182 return nil, err 183 } 184 s.toBeUploaded, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog) 185 if err != nil { 186 return nil, err 187 } 188 s.toBeRenamed, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog) 189 if err != nil { 190 return nil, err 191 } 192 if ci.MaxDuration > 0 { 193 s.maxDurationEndTime = time.Now().Add(ci.MaxDuration) 194 fs.Infof(s.fdst, "Transfer session %v deadline: %s", ci.CutoffMode, s.maxDurationEndTime.Format("2006/01/02 15:04:05")) 195 } 196 // If a max session duration has been defined add a deadline 197 // to the main context if cutoff mode is hard. This will cut 198 // the transfers off. 199 if !s.maxDurationEndTime.IsZero() && ci.CutoffMode == fs.CutoffModeHard { 200 s.ctx, s.cancel = context.WithDeadline(ctx, s.maxDurationEndTime) 201 } else { 202 s.ctx, s.cancel = context.WithCancel(ctx) 203 } 204 // Input context - cancel this for graceful stop. 205 // 206 // If a max session duration has been defined add a deadline 207 // to the input context if cutoff mode is graceful or soft. 208 // This won't stop the transfers but will cut the 209 // list/check/transfer pipelines. 210 if !s.maxDurationEndTime.IsZero() && ci.CutoffMode != fs.CutoffModeHard { 211 s.inCtx, s.inCancel = context.WithDeadline(s.ctx, s.maxDurationEndTime) 212 } else { 213 s.inCtx, s.inCancel = context.WithCancel(s.ctx) 214 } 215 if s.noTraverse && s.deleteMode != fs.DeleteModeOff { 216 if !fi.HaveFilesFrom() { 217 fs.Errorf(nil, "Ignoring --no-traverse with sync") 218 } 219 s.noTraverse = false 220 } 221 s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy) 222 if err != nil { 223 return nil, err 224 } 225 if s.noCheckDest { 226 if s.deleteMode != fs.DeleteModeOff { 227 return nil, errors.New("can't use --no-check-dest with sync: use copy instead") 228 } 229 if ci.Immutable { 230 return nil, errors.New("can't use --no-check-dest with --immutable") 231 } 232 if s.backupDir != nil { 233 return nil, errors.New("can't use --no-check-dest with --backup-dir") 234 } 235 } 236 if s.trackRenames { 237 // Don't track renames for remotes without server-side move support. 238 if !operations.CanServerSideMove(fdst) { 239 fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy") 240 s.trackRenames = false 241 } 242 if s.trackRenamesStrategy.hash() && s.commonHash == hash.None { 243 fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash") 244 s.trackRenames = false 245 } 246 247 if s.trackRenamesStrategy.modTime() && s.modifyWindow == fs.ModTimeNotSupported { 248 fs.Errorf(fdst, "Ignoring --track-renames as either the source or destination do not support modtime") 249 s.trackRenames = false 250 } 251 252 if s.deleteMode == fs.DeleteModeOff { 253 fs.Errorf(fdst, "Ignoring --track-renames as it doesn't work with copy or move, only sync") 254 s.trackRenames = false 255 } 256 } 257 if s.trackRenames { 258 // track renames needs delete after 259 if s.deleteMode != fs.DeleteModeOff { 260 s.deleteMode = fs.DeleteModeAfter 261 } 262 if s.noTraverse { 263 fs.Errorf(nil, "Ignoring --no-traverse with --track-renames") 264 s.noTraverse = false 265 } 266 } 267 // Make Fs for --backup-dir if required 268 if ci.BackupDir != "" || ci.Suffix != "" { 269 var err error 270 s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "") 271 if err != nil { 272 return nil, err 273 } 274 } 275 if len(ci.CompareDest) > 0 { 276 var err error 277 s.compareCopyDest, err = operations.GetCompareDest(ctx) 278 if err != nil { 279 return nil, err 280 } 281 } else if len(ci.CopyDest) > 0 { 282 var err error 283 s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst) 284 if err != nil { 285 return nil, err 286 } 287 } 288 return s, nil 289 } 290 291 // Check to see if the context has been cancelled 292 func (s *syncCopyMove) aborting() bool { 293 return s.ctx.Err() != nil 294 } 295 296 // This reads the map and pumps it into the channel passed in, closing 297 // the channel at the end 298 func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) { 299 outer: 300 for _, o := range files { 301 if s.aborting() { 302 break outer 303 } 304 select { 305 case out <- o: 306 case <-s.ctx.Done(): 307 break outer 308 } 309 } 310 close(out) 311 s.srcFilesResult <- nil 312 } 313 314 // This checks the types of errors returned while copying files 315 func (s *syncCopyMove) processError(err error) { 316 if err == nil { 317 return 318 } 319 if err == context.DeadlineExceeded { 320 err = fserrors.NoRetryError(err) 321 } else if err == accounting.ErrorMaxTransferLimitReachedGraceful { 322 if s.inCtx.Err() == nil { 323 fs.Logf(nil, "%v - stopping transfers", err) 324 // Cancel the march and stop the pipes 325 s.inCancel() 326 } 327 } else if err == context.Canceled && s.inCtx.Err() != nil { 328 // Ignore context Canceled if we have called s.inCancel() 329 return 330 } 331 s.errorMu.Lock() 332 defer s.errorMu.Unlock() 333 switch { 334 case fserrors.IsFatalError(err): 335 if !s.aborting() { 336 fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err) 337 s.cancel() 338 } 339 s.fatalErr = err 340 case fserrors.IsNoRetryError(err): 341 s.noRetryErr = err 342 default: 343 s.err = err 344 } 345 } 346 347 // Returns the current error (if any) in the order of precedence 348 // 349 // fatalErr 350 // normal error 351 // noRetryErr 352 func (s *syncCopyMove) currentError() error { 353 s.errorMu.Lock() 354 defer s.errorMu.Unlock() 355 if s.fatalErr != nil { 356 return s.fatalErr 357 } 358 if s.err != nil { 359 return s.err 360 } 361 return s.noRetryErr 362 } 363 364 // pairChecker reads Objects~s on in send to out if they need transferring. 365 // 366 // FIXME potentially doing lots of hashes at once 367 func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) { 368 defer wg.Done() 369 for { 370 pair, ok := in.GetMax(s.inCtx, fraction) 371 if !ok { 372 return 373 } 374 src := pair.Src 375 var err error 376 tr := accounting.Stats(s.ctx).NewCheckingTransfer(src, "checking") 377 // Check to see if can store this 378 if src.Storable() { 379 needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) 380 if needTransfer { 381 NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir) 382 if err != nil { 383 s.processError(err) 384 s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err) 385 } 386 if NoNeedTransfer { 387 needTransfer = false 388 } 389 } 390 // Fix case for case insensitive filesystems 391 if s.ci.FixCase && !s.ci.Immutable && src.Remote() != pair.Dst.Remote() { 392 if newDst, err := operations.Move(s.ctx, s.fdst, nil, src.Remote(), pair.Dst); err != nil { 393 fs.Errorf(pair.Dst, "Error while attempting to rename to %s: %v", src.Remote(), err) 394 s.processError(err) 395 } else { 396 fs.Infof(pair.Dst, "Fixed case by renaming to: %s", src.Remote()) 397 pair.Dst = newDst 398 } 399 } 400 if needTransfer { 401 // If files are treated as immutable, fail if destination exists and does not match 402 if s.ci.Immutable && pair.Dst != nil { 403 err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified)) 404 fs.Errorf(pair.Dst, "Source and destination exist but do not match: %v", err) 405 s.processError(err) 406 } else { 407 if pair.Dst != nil { 408 s.markDirModifiedObject(pair.Dst) 409 } else { 410 s.markDirModifiedObject(src) 411 } 412 // If destination already exists, then we must move it into --backup-dir if required 413 if pair.Dst != nil && s.backupDir != nil { 414 err := operations.MoveBackupDir(s.ctx, s.backupDir, pair.Dst) 415 if err != nil { 416 s.processError(err) 417 s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err) 418 } else { 419 // If successful zero out the dst as it is no longer there and copy the file 420 pair.Dst = nil 421 ok = out.Put(s.inCtx, pair) 422 if !ok { 423 return 424 } 425 } 426 } else { 427 ok = out.Put(s.inCtx, pair) 428 if !ok { 429 return 430 } 431 } 432 } 433 } else { 434 // If moving need to delete the files we don't need to copy 435 if s.DoMove { 436 // Delete src if no error on copy 437 if operations.SameObject(src, pair.Dst) { 438 fs.Logf(src, "Not removing source file as it is the same file as the destination") 439 } else if s.ci.IgnoreExisting { 440 fs.Debugf(src, "Not removing source file as destination file exists and --ignore-existing is set") 441 } else if s.checkFirst && s.ci.OrderBy != "" { 442 // If we want perfect ordering then use the transfers to delete the file 443 // 444 // We send src == dst, to say we want the src deleted 445 ok = out.Put(s.inCtx, fs.ObjectPair{Src: src, Dst: src}) 446 if !ok { 447 return 448 } 449 } else { 450 deleteFileErr := operations.DeleteFile(s.ctx, src) 451 s.processError(deleteFileErr) 452 s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, deleteFileErr) 453 } 454 } 455 } 456 } 457 tr.Done(s.ctx, err) 458 } 459 } 460 461 // pairRenamer reads Objects~s on in and attempts to rename them, 462 // otherwise it sends them out if they need transferring. 463 func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) { 464 defer wg.Done() 465 for { 466 pair, ok := in.GetMax(s.inCtx, fraction) 467 if !ok { 468 return 469 } 470 src := pair.Src 471 if !s.tryRename(src) { 472 // pass on if not renamed 473 fs.Debugf(src, "Need to transfer - No matching file found at Destination") 474 ok = out.Put(s.inCtx, pair) 475 if !ok { 476 return 477 } 478 } 479 } 480 } 481 482 // pairCopyOrMove reads Objects on in and moves or copies them. 483 func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, fraction int, wg *sync.WaitGroup) { 484 defer wg.Done() 485 var err error 486 for { 487 pair, ok := in.GetMax(s.inCtx, fraction) 488 if !ok { 489 return 490 } 491 src := pair.Src 492 dst := pair.Dst 493 if s.DoMove { 494 if src != dst { 495 _, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src) 496 } else { 497 // src == dst signals delete the src 498 err = operations.DeleteFile(ctx, src) 499 } 500 } else { 501 _, err = operations.Copy(ctx, fdst, dst, src.Remote(), src) 502 } 503 s.processError(err) 504 if err != nil { 505 s.logger(ctx, operations.TransferError, src, dst, err) 506 } 507 } 508 } 509 510 // This starts the background checkers. 511 func (s *syncCopyMove) startCheckers() { 512 s.checkerWg.Add(s.ci.Checkers) 513 for i := 0; i < s.ci.Checkers; i++ { 514 fraction := (100 * i) / s.ci.Checkers 515 go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg) 516 } 517 } 518 519 // This stops the background checkers 520 func (s *syncCopyMove) stopCheckers() { 521 s.toBeChecked.Close() 522 fs.Debugf(s.fdst, "Waiting for checks to finish") 523 s.checkerWg.Wait() 524 } 525 526 // This starts the background transfers 527 func (s *syncCopyMove) startTransfers() { 528 s.transfersWg.Add(s.ci.Transfers) 529 for i := 0; i < s.ci.Transfers; i++ { 530 fraction := (100 * i) / s.ci.Transfers 531 go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg) 532 } 533 } 534 535 // This stops the background transfers 536 func (s *syncCopyMove) stopTransfers() { 537 s.toBeUploaded.Close() 538 fs.Debugf(s.fdst, "Waiting for transfers to finish") 539 s.transfersWg.Wait() 540 } 541 542 // This starts the background renamers. 543 func (s *syncCopyMove) startRenamers() { 544 if !s.trackRenames { 545 return 546 } 547 s.renamerWg.Add(s.ci.Checkers) 548 for i := 0; i < s.ci.Checkers; i++ { 549 fraction := (100 * i) / s.ci.Checkers 550 go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg) 551 } 552 } 553 554 // This stops the background renamers 555 func (s *syncCopyMove) stopRenamers() { 556 if !s.trackRenames { 557 return 558 } 559 s.toBeRenamed.Close() 560 fs.Debugf(s.fdst, "Waiting for renames to finish") 561 s.renamerWg.Wait() 562 } 563 564 // This starts the collection of possible renames 565 func (s *syncCopyMove) startTrackRenames() { 566 if !s.trackRenames { 567 return 568 } 569 s.trackRenamesWg.Add(1) 570 go func() { 571 defer s.trackRenamesWg.Done() 572 for o := range s.trackRenamesCh { 573 s.renameCheck = append(s.renameCheck, o) 574 } 575 }() 576 } 577 578 // This stops the background rename collection 579 func (s *syncCopyMove) stopTrackRenames() { 580 if !s.trackRenames { 581 return 582 } 583 close(s.trackRenamesCh) 584 s.trackRenamesWg.Wait() 585 } 586 587 // This starts the background deletion of files for --delete-during 588 func (s *syncCopyMove) startDeleters() { 589 if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly { 590 return 591 } 592 s.deletersWg.Add(1) 593 go func() { 594 defer s.deletersWg.Done() 595 err := operations.DeleteFilesWithBackupDir(s.ctx, s.deleteFilesCh, s.backupDir) 596 s.processError(err) 597 }() 598 } 599 600 // This stops the background deleters 601 func (s *syncCopyMove) stopDeleters() { 602 if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly { 603 return 604 } 605 close(s.deleteFilesCh) 606 s.deletersWg.Wait() 607 } 608 609 // This deletes the files in the dstFiles map. If checkSrcMap is set 610 // then it checks to see if they exist first in srcFiles the source 611 // file map, otherwise it unconditionally deletes them. If 612 // checkSrcMap is clear then it assumes that the any source files that 613 // have been found have been removed from dstFiles already. 614 func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error { 615 if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors { 616 fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) 617 // log all deletes as errors 618 for remote, o := range s.dstFiles { 619 if checkSrcMap { 620 _, exists := s.srcFiles[remote] 621 if exists { 622 continue 623 } 624 } 625 s.logger(s.ctx, operations.TransferError, nil, o, fs.ErrorNotDeleting) 626 } 627 return fs.ErrorNotDeleting 628 } 629 630 // Delete the spare files 631 toDelete := make(fs.ObjectsChan, s.ci.Checkers) 632 go func() { 633 outer: 634 for remote, o := range s.dstFiles { 635 if checkSrcMap { 636 _, exists := s.srcFiles[remote] 637 if exists { 638 continue 639 } 640 } 641 if s.aborting() { 642 break 643 } 644 select { 645 case <-s.ctx.Done(): 646 break outer 647 case toDelete <- o: 648 } 649 } 650 close(toDelete) 651 }() 652 return operations.DeleteFilesWithBackupDir(s.ctx, toDelete, s.backupDir) 653 } 654 655 // This deletes the empty directories in the slice passed in. It 656 // ignores any errors deleting directories 657 func (s *syncCopyMove) deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error { 658 if len(entriesMap) == 0 { 659 return nil 660 } 661 if accounting.Stats(ctx).Errored() && !s.ci.IgnoreErrors { 662 fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs) 663 return fs.ErrorNotDeletingDirs 664 } 665 666 var entries fs.DirEntries 667 for _, entry := range entriesMap { 668 entries = append(entries, entry) 669 } 670 // Now delete the empty directories starting from the longest path 671 sort.Sort(entries) 672 var errorCount int 673 var okCount int 674 for i := len(entries) - 1; i >= 0; i-- { 675 entry := entries[i] 676 dir, ok := entry.(fs.Directory) 677 if ok { 678 // TryRmdir only deletes empty directories 679 err := operations.TryRmdir(ctx, f, dir.Remote()) 680 if err != nil { 681 fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err) 682 errorCount++ 683 } else { 684 okCount++ 685 } 686 } else { 687 fs.Errorf(f, "Not a directory: %v", entry) 688 } 689 } 690 if errorCount > 0 { 691 fs.Debugf(f, "failed to delete %d directories", errorCount) 692 } 693 if okCount > 0 { 694 fs.Debugf(f, "deleted %d directories", okCount) 695 } 696 return nil 697 } 698 699 // mark the parent of entry as not empty and if entry is a directory mark it as potentially empty. 700 func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) { 701 s.srcEmptyDirsMu.Lock() 702 defer s.srcEmptyDirsMu.Unlock() 703 // Mark entry as potentially empty if it is a directory 704 _, isDir := entry.(fs.Directory) 705 if isDir { 706 s.srcEmptyDirs[entry.Remote()] = entry 707 // if DoMove and --delete-empty-src-dirs flag is set then record the parent but 708 // don't remove any as we are about to move files out of them them making the 709 // directory empty. 710 if s.DoMove && s.deleteEmptySrcDirs { 711 s.srcMoveEmptyDirs[entry.Remote()] = entry 712 } 713 } 714 parentDir := path.Dir(entry.Remote()) 715 if isDir && s.copyEmptySrcDirs { 716 // Mark its parent as not empty 717 if parentDir == "." { 718 parentDir = "" 719 } 720 delete(s.srcEmptyDirs, parentDir) 721 } 722 if !isDir { 723 // Mark ALL its parents as not empty 724 for { 725 if parentDir == "." { 726 parentDir = "" 727 } 728 delete(s.srcEmptyDirs, parentDir) 729 if parentDir == "" { 730 break 731 } 732 parentDir = path.Dir(parentDir) 733 } 734 } 735 } 736 737 // parseTrackRenamesStrategy turns a config string into a trackRenamesStrategy 738 func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy, err error) { 739 if len(strategies) == 0 { 740 return strategy, nil 741 } 742 for _, s := range strings.Split(strategies, ",") { 743 switch s { 744 case "hash": 745 strategy |= trackRenamesStrategyHash 746 case "modtime": 747 strategy |= trackRenamesStrategyModtime 748 case "leaf": 749 strategy |= trackRenamesStrategyLeaf 750 case "size": 751 // ignore 752 default: 753 return strategy, fmt.Errorf("unknown track renames strategy %q", s) 754 } 755 } 756 return strategy, nil 757 } 758 759 // renameID makes a string with the size and the other identifiers of the requested rename strategies 760 // 761 // it may return an empty string in which case no hash could be made 762 func (s *syncCopyMove) renameID(obj fs.Object, renamesStrategy trackRenamesStrategy, precision time.Duration) string { 763 var builder strings.Builder 764 765 fmt.Fprintf(&builder, "%d", obj.Size()) 766 767 if renamesStrategy.hash() { 768 var err error 769 hash, err := obj.Hash(s.ctx, s.commonHash) 770 if err != nil { 771 fs.Debugf(obj, "Hash failed: %v", err) 772 return "" 773 } 774 if hash == "" { 775 return "" 776 } 777 778 builder.WriteRune(',') 779 builder.WriteString(hash) 780 } 781 782 // for renamesStrategy.modTime() we don't add to the hash but we check the times in 783 // popRenameMap 784 785 if renamesStrategy.leaf() { 786 builder.WriteRune(',') 787 builder.WriteString(path.Base(obj.Remote())) 788 } 789 790 return builder.String() 791 } 792 793 // pushRenameMap adds the object with hash to the rename map 794 func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) { 795 s.renameMapMu.Lock() 796 s.renameMap[hash] = append(s.renameMap[hash], obj) 797 s.renameMapMu.Unlock() 798 } 799 800 // popRenameMap finds the object with hash and pop the first match from 801 // renameMap or returns nil if not found. 802 func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) { 803 s.renameMapMu.Lock() 804 defer s.renameMapMu.Unlock() 805 dsts, ok := s.renameMap[hash] 806 if ok && len(dsts) > 0 { 807 // Element to remove 808 i := 0 809 810 // If using track renames strategy modtime then we need to check the modtimes here 811 if s.trackRenamesStrategy.modTime() { 812 i = -1 813 srcModTime := src.ModTime(s.ctx) 814 for j, dst := range dsts { 815 dstModTime := dst.ModTime(s.ctx) 816 dt := dstModTime.Sub(srcModTime) 817 if dt < s.modifyWindow && dt > -s.modifyWindow { 818 i = j 819 break 820 } 821 } 822 // If nothing matched then return nil 823 if i < 0 { 824 return nil 825 } 826 } 827 828 // Remove the entry and return it 829 dst = dsts[i] 830 dsts = append(dsts[:i], dsts[i+1:]...) 831 if len(dsts) > 0 { 832 s.renameMap[hash] = dsts 833 } else { 834 delete(s.renameMap, hash) 835 } 836 } 837 return dst 838 } 839 840 // makeRenameMap builds a map of the destination files by hash that 841 // match sizes in the slice of objects in s.renameCheck 842 func (s *syncCopyMove) makeRenameMap() { 843 fs.Infof(s.fdst, "Making map for --track-renames") 844 845 // first make a map of possible sizes we need to check 846 possibleSizes := map[int64]struct{}{} 847 for _, obj := range s.renameCheck { 848 possibleSizes[obj.Size()] = struct{}{} 849 } 850 851 // pump all the dstFiles into in 852 in := make(chan fs.Object, s.ci.Checkers) 853 go s.pumpMapToChan(s.dstFiles, in) 854 855 // now make a map of size,hash for all dstFiles 856 s.renameMap = make(map[string][]fs.Object) 857 var wg sync.WaitGroup 858 wg.Add(s.ci.Checkers) 859 for i := 0; i < s.ci.Checkers; i++ { 860 go func() { 861 defer wg.Done() 862 for obj := range in { 863 // only create hash for dst fs.Object if its size could match 864 if _, found := possibleSizes[obj.Size()]; found { 865 tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj, "renaming") 866 hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow) 867 868 if hash != "" { 869 s.pushRenameMap(hash, obj) 870 } 871 872 tr.Done(s.ctx, nil) 873 } 874 } 875 }() 876 } 877 wg.Wait() 878 fs.Infof(s.fdst, "Finished making map for --track-renames") 879 } 880 881 // tryRename renames an src object when doing track renames if 882 // possible, it returns true if the object was renamed. 883 func (s *syncCopyMove) tryRename(src fs.Object) bool { 884 // Calculate the hash of the src object 885 hash := s.renameID(src, s.trackRenamesStrategy, fs.GetModifyWindow(s.ctx, s.fsrc, s.fdst)) 886 887 if hash == "" { 888 return false 889 } 890 891 // Get a match on fdst 892 dst := s.popRenameMap(hash, src) 893 if dst == nil { 894 return false 895 } 896 897 // Find dst object we are about to overwrite if it exists 898 dstOverwritten, _ := s.fdst.NewObject(s.ctx, src.Remote()) 899 900 // Rename dst to have name src.Remote() 901 _, err := operations.Move(s.ctx, s.fdst, dstOverwritten, src.Remote(), dst) 902 if err != nil { 903 fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err) 904 return false 905 } 906 907 // remove file from dstFiles if present 908 s.dstFilesMu.Lock() 909 delete(s.dstFiles, dst.Remote()) 910 s.dstFilesMu.Unlock() 911 912 fs.Infof(src, "Renamed from %q", dst.Remote()) 913 return true 914 } 915 916 // Syncs fsrc into fdst 917 // 918 // If Delete is true then it deletes any files in fdst that aren't in fsrc 919 // 920 // If DoMove is true then files will be moved instead of copied. 921 // 922 // dir is the start directory, "" for root 923 func (s *syncCopyMove) run() error { 924 if operations.Same(s.fdst, s.fsrc) { 925 fs.Errorf(s.fdst, "Nothing to do as source and destination are the same") 926 return nil 927 } 928 929 // Start background checking and transferring pipeline 930 s.startCheckers() 931 s.startRenamers() 932 if !s.checkFirst { 933 s.startTransfers() 934 } 935 s.startDeleters() 936 s.dstFiles = make(map[string]fs.Object) 937 938 s.startTrackRenames() 939 940 // set up a march over fdst and fsrc 941 m := &march.March{ 942 Ctx: s.inCtx, 943 Fdst: s.fdst, 944 Fsrc: s.fsrc, 945 Dir: s.dir, 946 NoTraverse: s.noTraverse, 947 Callback: s, 948 DstIncludeAll: s.fi.Opt.DeleteExcluded, 949 NoCheckDest: s.noCheckDest, 950 NoUnicodeNormalization: s.noUnicodeNormalization, 951 } 952 s.processError(m.Run(s.ctx)) 953 954 s.stopTrackRenames() 955 if s.trackRenames { 956 // Build the map of the remaining dstFiles by hash 957 s.makeRenameMap() 958 // Attempt renames for all the files which don't have a matching dst 959 for _, src := range s.renameCheck { 960 ok := s.toBeRenamed.Put(s.inCtx, fs.ObjectPair{Src: src, Dst: nil}) 961 if !ok { 962 break 963 } 964 } 965 } 966 967 // Stop background checking and transferring pipeline 968 s.stopCheckers() 969 if s.checkFirst { 970 fs.Infof(s.fdst, "Checks finished, now starting transfers") 971 s.startTransfers() 972 } 973 s.stopRenamers() 974 s.stopTransfers() 975 s.stopDeleters() 976 977 // Delete files after 978 if s.deleteMode == fs.DeleteModeAfter { 979 if s.currentError() != nil && !s.ci.IgnoreErrors { 980 fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) 981 } else { 982 s.processError(s.deleteFiles(false)) 983 } 984 } 985 986 // Update modtimes for directories if necessary 987 if s.setDirModTime && s.setDirModTimeAfter { 988 s.processError(s.setDelayedDirModTimes(s.ctx)) 989 } 990 991 // Prune empty directories 992 if s.deleteMode != fs.DeleteModeOff { 993 if s.currentError() != nil && !s.ci.IgnoreErrors { 994 fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs) 995 } else { 996 s.processError(s.deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs)) 997 } 998 } 999 1000 // Delete empty fsrc subdirectories 1001 // if DoMove and --delete-empty-src-dirs flag is set 1002 if s.DoMove && s.deleteEmptySrcDirs { 1003 // delete potentially empty subdirectories that were part of the move 1004 s.processError(s.deleteEmptyDirectories(s.ctx, s.fsrc, s.srcMoveEmptyDirs)) 1005 } 1006 1007 // Read the error out of the contexts if there is one 1008 s.processError(s.ctx.Err()) 1009 s.processError(s.inCtx.Err()) 1010 1011 // If the duration was exceeded then add a Fatal Error so we don't retry 1012 if !s.maxDurationEndTime.IsZero() && time.Since(s.maxDurationEndTime) > 0 { 1013 fs.Errorf(s.fdst, "%v", ErrorMaxDurationReachedFatal) 1014 s.processError(ErrorMaxDurationReachedFatal) 1015 } 1016 1017 // Print nothing to transfer message if there were no transfers and no errors 1018 if s.deleteMode != fs.DeleteModeOnly && accounting.Stats(s.ctx).GetTransfers() == 0 && s.currentError() == nil { 1019 fs.Infof(nil, "There was nothing to transfer") 1020 } 1021 1022 // cancel the contexts to free resources 1023 s.inCancel() 1024 s.cancel() 1025 return s.currentError() 1026 } 1027 1028 // DstOnly have an object which is in the destination only 1029 func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) { 1030 if s.deleteMode == fs.DeleteModeOff { 1031 if s.usingLogger { 1032 switch x := dst.(type) { 1033 case fs.Object: 1034 s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil) 1035 case fs.Directory: 1036 // it's a directory that we'd normally skip, because we're not deleting anything on the dest 1037 // however, to make sure every file is logged, we need to list it, so we need to return true here. 1038 // we skip this when not using logger. 1039 s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir) 1040 return true 1041 } 1042 } 1043 return false 1044 } 1045 switch x := dst.(type) { 1046 case fs.Object: 1047 s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil) 1048 switch s.deleteMode { 1049 case fs.DeleteModeAfter: 1050 // record object as needs deleting 1051 s.dstFilesMu.Lock() 1052 s.dstFiles[x.Remote()] = x 1053 s.dstFilesMu.Unlock() 1054 case fs.DeleteModeDuring, fs.DeleteModeOnly: 1055 select { 1056 case <-s.ctx.Done(): 1057 return 1058 case s.deleteFilesCh <- x: 1059 } 1060 default: 1061 panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode)) 1062 } 1063 case fs.Directory: 1064 // Do the same thing to the entire contents of the directory 1065 // Record directory as it is potentially empty and needs deleting 1066 if s.fdst.Features().CanHaveEmptyDirectories { 1067 s.dstEmptyDirsMu.Lock() 1068 s.dstEmptyDirs[dst.Remote()] = dst 1069 s.dstEmptyDirsMu.Unlock() 1070 s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir) 1071 } 1072 return true 1073 default: 1074 panic("Bad object in DirEntries") 1075 1076 } 1077 return false 1078 } 1079 1080 // keeps track of dirs with changed contents, to avoid setting modtimes on dirs that haven't changed 1081 func (s *syncCopyMove) markDirModified(dir string) { 1082 if !s.setDirModTimeAfter { 1083 return 1084 } 1085 s.setDirModTimeMu.Lock() 1086 defer s.setDirModTimeMu.Unlock() 1087 s.modifiedDirs[dir] = struct{}{} 1088 } 1089 1090 // like markDirModified, but accepts an Object instead of a string. 1091 // the marked dir will be this object's parent. 1092 func (s *syncCopyMove) markDirModifiedObject(o fs.Object) { 1093 dir := path.Dir(o.Remote()) 1094 if dir == "." { 1095 dir = "" 1096 } 1097 s.markDirModified(dir) 1098 } 1099 1100 // copyDirMetadata copies the src directory modTime or Metadata to dst 1101 // or f if nil. If dst is nil then it uses dir as the name of the new 1102 // directory. 1103 // 1104 // It returns the destination directory if possible. Note that this may 1105 // be nil. 1106 func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Directory, dir string, src fs.Directory) (newDst fs.Directory) { 1107 var err error 1108 equal := operations.DirsEqual(ctx, src, dst, operations.DirsEqualOpt{ModifyWindow: s.modifyWindow, SetDirModtime: s.setDirModTime, SetDirMetadata: s.setDirMetadata}) 1109 if !s.setDirModTimeAfter && equal { 1110 return nil 1111 } 1112 setMeta := true 1113 if s.setDirModTimeAfter && equal { 1114 newDst = dst 1115 } else if s.copyEmptySrcDirs { 1116 if s.setDirMetadata { 1117 newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src) 1118 } else if s.setDirModTime { 1119 if dst == nil { 1120 newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx)) 1121 } else { 1122 newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx)) 1123 } 1124 } else if dst == nil { 1125 // Create the directory if it doesn't exist 1126 err = operations.Mkdir(ctx, f, dir) 1127 } 1128 } else { 1129 setMeta = s.setDirMetadata 1130 newDst = dst 1131 } 1132 // If we need to set modtime after and we created a dir, then save it for later 1133 if s.setDirModTime && s.setDirModTimeAfter && err == nil { 1134 if newDst != nil { 1135 dir = newDst.Remote() 1136 } 1137 level := strings.Count(dir, "/") + 1 1138 // The root directory "" is at the top level 1139 if dir == "" { 1140 level = 0 1141 } 1142 s.setDirModTimeMu.Lock() 1143 // Keep track of the maximum level inserted 1144 if level > s.setDirModTimesMaxLevel { 1145 s.setDirModTimesMaxLevel = level 1146 } 1147 set := setDirModTime{ 1148 dst: newDst, 1149 dir: dir, 1150 modTime: src.ModTime(ctx), 1151 level: level, 1152 } 1153 if setMeta { 1154 set.src = src 1155 } 1156 s.setDirModTimes = append(s.setDirModTimes, set) 1157 s.setDirModTimeMu.Unlock() 1158 fs.Debugf(nil, "Added delayed dir = %q, newDst=%v", dir, newDst) 1159 } 1160 s.processError(err) 1161 if err != nil { 1162 return nil 1163 } 1164 return newDst 1165 } 1166 1167 // Set the modtimes for directories 1168 func (s *syncCopyMove) setDelayedDirModTimes(ctx context.Context) error { 1169 s.setDirModTimeMu.Lock() 1170 defer s.setDirModTimeMu.Unlock() 1171 1172 // Timestamp all directories at the same level in parallel, deepest first 1173 // We do this by iterating the slice multiple times to save memory 1174 // There could be a lot of directories in this slice. 1175 errCount := errcount.New() 1176 for level := s.setDirModTimesMaxLevel; level >= 0; level-- { 1177 g, gCtx := errgroup.WithContext(ctx) 1178 g.SetLimit(s.ci.Checkers) 1179 for _, item := range s.setDirModTimes { 1180 if item.level != level { 1181 continue 1182 } 1183 // End early if error 1184 if gCtx.Err() != nil { 1185 break 1186 } 1187 if _, ok := s.modifiedDirs[item.dir]; !ok { 1188 continue 1189 } 1190 if !s.copyEmptySrcDirs { 1191 if _, isEmpty := s.srcEmptyDirs[item.dir]; isEmpty { 1192 continue 1193 } 1194 } 1195 item := item 1196 if s.setDirModTimeAfter { // mark dir's parent as modified 1197 dir := path.Dir(item.dir) 1198 if dir == "." { 1199 dir = "" 1200 } 1201 s.modifiedDirs[dir] = struct{}{} // lock is already held 1202 } 1203 g.Go(func() error { 1204 var err error 1205 // if item.src is set must copy full metadata 1206 if item.src != nil { 1207 _, err = operations.CopyDirMetadata(gCtx, s.fdst, item.dst, item.dir, item.src) 1208 } else { 1209 _, err = operations.SetDirModTime(gCtx, s.fdst, item.dst, item.dir, item.modTime) 1210 } 1211 if err != nil { 1212 err = fs.CountError(err) 1213 fs.Errorf(item.dir, "Failed to update directory timestamp or metadata: %v", err) 1214 errCount.Add(err) 1215 } 1216 return nil // don't return errors, just count them 1217 }) 1218 } 1219 err := g.Wait() 1220 if err != nil { 1221 return err 1222 } 1223 } 1224 return errCount.Err("failed to set directory modtime") 1225 } 1226 1227 // SrcOnly have an object which is in the source only 1228 func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { 1229 if s.deleteMode == fs.DeleteModeOnly { 1230 return false 1231 } 1232 switch x := src.(type) { 1233 case fs.Object: 1234 s.logger(s.ctx, operations.MissingOnDst, x, nil, nil) 1235 s.markParentNotEmpty(src) 1236 1237 if s.trackRenames { 1238 // Save object to check for a rename later 1239 select { 1240 case <-s.ctx.Done(): 1241 return 1242 case s.trackRenamesCh <- x: 1243 } 1244 } else { 1245 // Check CompareDest && CopyDest 1246 NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, nil, x, s.compareCopyDest, s.backupDir) 1247 if err != nil { 1248 s.processError(err) 1249 s.logger(s.ctx, operations.TransferError, x, nil, err) 1250 } 1251 if !NoNeedTransfer { 1252 // No need to check since doesn't exist 1253 fs.Debugf(src, "Need to transfer - File not found at Destination") 1254 s.markDirModifiedObject(x) 1255 ok := s.toBeUploaded.Put(s.inCtx, fs.ObjectPair{Src: x, Dst: nil}) 1256 if !ok { 1257 return 1258 } 1259 } 1260 } 1261 case fs.Directory: 1262 // Do the same thing to the entire contents of the directory 1263 s.markParentNotEmpty(src) 1264 s.logger(s.ctx, operations.MissingOnDst, src, nil, fs.ErrorIsDir) 1265 1266 // Create the directory and make sure the Metadata/ModTime is correct 1267 s.copyDirMetadata(s.ctx, s.fdst, nil, x.Remote(), x) 1268 s.markDirModified(x.Remote()) 1269 return true 1270 default: 1271 panic("Bad object in DirEntries") 1272 } 1273 return false 1274 } 1275 1276 // Match is called when src and dst are present, so sync src to dst 1277 func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { 1278 switch srcX := src.(type) { 1279 case fs.Object: 1280 s.markParentNotEmpty(src) 1281 1282 if s.deleteMode == fs.DeleteModeOnly { 1283 return false 1284 } 1285 dstX, ok := dst.(fs.Object) 1286 if ok { 1287 // No logger here because we'll handle it in equal() 1288 ok = s.toBeChecked.Put(s.inCtx, fs.ObjectPair{Src: srcX, Dst: dstX}) 1289 if !ok { 1290 return false 1291 } 1292 } else { 1293 // FIXME src is file, dst is directory 1294 err := errors.New("can't overwrite directory with file") 1295 fs.Errorf(dst, "%v", err) 1296 s.processError(err) 1297 s.logger(ctx, operations.TransferError, srcX, dstX, err) 1298 } 1299 case fs.Directory: 1300 // Do the same thing to the entire contents of the directory 1301 s.markParentNotEmpty(src) 1302 dstX, ok := dst.(fs.Directory) 1303 if ok { 1304 s.logger(s.ctx, operations.Match, src, dst, fs.ErrorIsDir) 1305 // Create the directory and make sure the Metadata/ModTime is correct 1306 s.copyDirMetadata(s.ctx, s.fdst, dstX, "", srcX) 1307 1308 if s.ci.FixCase && !s.ci.Immutable && src.Remote() != dst.Remote() { 1309 // Fix case for case insensitive filesystems 1310 // Fix each dir before recursing into subdirs and files 1311 err := operations.DirMoveCaseInsensitive(s.ctx, s.fdst, dst.Remote(), src.Remote()) 1312 if err != nil { 1313 fs.Errorf(dst, "Error while attempting to rename to %s: %v", src.Remote(), err) 1314 s.processError(err) 1315 } else { 1316 fs.Infof(dst, "Fixed case by renaming to: %s", src.Remote()) 1317 } 1318 } 1319 1320 return true 1321 } 1322 // FIXME src is dir, dst is file 1323 err := errors.New("can't overwrite file with directory") 1324 fs.Errorf(dst, "%v", err) 1325 s.processError(err) 1326 s.logger(ctx, operations.TransferError, src.(fs.ObjectInfo), dst.(fs.ObjectInfo), err) 1327 default: 1328 panic("Bad object in DirEntries") 1329 } 1330 return false 1331 } 1332 1333 // Syncs fsrc into fdst 1334 // 1335 // If Delete is true then it deletes any files in fdst that aren't in fsrc 1336 // 1337 // If DoMove is true then files will be moved instead of copied. 1338 // 1339 // dir is the start directory, "" for root 1340 func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { 1341 ci := fs.GetConfig(ctx) 1342 if deleteMode != fs.DeleteModeOff && DoMove { 1343 return fserrors.FatalError(errors.New("can't delete and move at the same time")) 1344 } 1345 // Run an extra pass to delete only 1346 if deleteMode == fs.DeleteModeBefore { 1347 if ci.TrackRenames { 1348 return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames")) 1349 } 1350 // only delete stuff during in this pass 1351 do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs) 1352 if err != nil { 1353 return err 1354 } 1355 err = do.run() 1356 if err != nil { 1357 return err 1358 } 1359 // Next pass does a copy only 1360 deleteMode = fs.DeleteModeOff 1361 } 1362 do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs) 1363 if err != nil { 1364 return err 1365 } 1366 return do.run() 1367 } 1368 1369 // Sync fsrc into fdst 1370 func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { 1371 ci := fs.GetConfig(ctx) 1372 return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs) 1373 } 1374 1375 // CopyDir copies fsrc into fdst 1376 func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { 1377 return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs) 1378 } 1379 1380 // moveDir moves fsrc into fdst 1381 func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { 1382 return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs) 1383 } 1384 1385 // MoveDir moves fsrc into fdst 1386 func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { 1387 fi := filter.GetConfig(ctx) 1388 if operations.Same(fdst, fsrc) { 1389 fs.Errorf(fdst, "Nothing to do as source and destination are the same") 1390 return nil 1391 } 1392 1393 // First attempt to use DirMover if exists, same Fs and no filters are active 1394 if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && fi.InActive() { 1395 if operations.SkipDestructive(ctx, fdst, "server-side directory move") { 1396 return nil 1397 } 1398 fs.Debugf(fdst, "Using server-side directory move") 1399 err := fdstDirMove(ctx, fsrc, "", "") 1400 switch err { 1401 case fs.ErrorCantDirMove, fs.ErrorDirExists: 1402 fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err) 1403 case nil: 1404 fs.Infof(fdst, "Server side directory move succeeded") 1405 return nil 1406 default: 1407 err = fs.CountError(err) 1408 fs.Errorf(fdst, "Server side directory move failed: %v", err) 1409 return err 1410 } 1411 } 1412 1413 // Otherwise move the files one by one 1414 return moveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs) 1415 }