github.com/minio/mc@v0.0.0-20240503112107-b471de8d1882/cmd/mirror-main.go (about)

     1  // Copyright (c) 2015-2022 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"context"
    22  	"fmt"
    23  	"math/rand"
    24  	"net/http"
    25  	"path"
    26  	"path/filepath"
    27  	"runtime"
    28  	"strings"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/fatih/color"
    33  	"github.com/minio/cli"
    34  	json "github.com/minio/colorjson"
    35  	"github.com/minio/mc/pkg/probe"
    36  	"github.com/minio/minio-go/v7"
    37  	"github.com/minio/minio-go/v7/pkg/encrypt"
    38  	"github.com/minio/minio-go/v7/pkg/notification"
    39  	"github.com/minio/pkg/v2/console"
    40  	"github.com/prometheus/client_golang/prometheus"
    41  	"github.com/prometheus/client_golang/prometheus/promauto"
    42  	"github.com/prometheus/client_golang/prometheus/promhttp"
    43  )
    44  
    45  // mirror specific flags.
    46  var (
    47  	mirrorFlags = []cli.Flag{
    48  		cli.BoolFlag{
    49  			Name:   "force",
    50  			Usage:  "force allows forced overwrite or removal of object(s) on target",
    51  			Hidden: true, // Hidden since this option is deprecated.
    52  		},
    53  		cli.BoolFlag{
    54  			Name:  "overwrite",
    55  			Usage: "overwrite object(s) on target if it differs from source",
    56  		},
    57  		cli.BoolFlag{
    58  			Name:   "fake",
    59  			Usage:  "perform a fake mirror operation",
    60  			Hidden: true, // deprecated 2022
    61  		},
    62  		cli.BoolFlag{
    63  			Name:  "dry-run",
    64  			Usage: "perform a fake mirror operation",
    65  		},
    66  		cli.BoolFlag{
    67  			Name:  "watch, w",
    68  			Usage: "watch and synchronize changes",
    69  		},
    70  		cli.BoolFlag{
    71  			Name:  "remove",
    72  			Usage: "remove extraneous object(s) on target",
    73  		},
    74  		cli.StringFlag{
    75  			Name:  "region",
    76  			Usage: "specify region when creating new bucket(s) on target",
    77  			Value: "us-east-1",
    78  		},
    79  		cli.BoolFlag{
    80  			Name:  "preserve, a",
    81  			Usage: "preserve file(s)/object(s) attributes and bucket(s) policy/locking configuration(s) on target bucket(s)",
    82  		},
    83  		cli.BoolFlag{
    84  			Name:  "md5",
    85  			Usage: "force all upload(s) to calculate md5sum checksum",
    86  		},
    87  		cli.BoolFlag{
    88  			Name:   "multi-master",
    89  			Usage:  "enable multi-master multi-site setup",
    90  			Hidden: true,
    91  		},
    92  		cli.BoolFlag{
    93  			Name:  "active-active",
    94  			Usage: "enable active-active multi-site setup",
    95  		},
    96  		cli.BoolFlag{
    97  			Name:  "disable-multipart",
    98  			Usage: "disable multipart upload feature",
    99  		},
   100  		cli.StringSliceFlag{
   101  			Name:  "exclude",
   102  			Usage: "exclude object(s) that match specified object name pattern",
   103  		},
   104  		cli.StringSliceFlag{
   105  			Name:  "exclude-bucket",
   106  			Usage: "exclude bucket(s) that match specified bucket name pattern",
   107  		},
   108  		cli.StringSliceFlag{
   109  			Name:  "exclude-storageclass",
   110  			Usage: "exclude object(s) that match the specified storage class",
   111  		},
   112  		cli.StringFlag{
   113  			Name:  "older-than",
   114  			Usage: "filter object(s) older than value in duration string (e.g. 7d10h31s)",
   115  		},
   116  		cli.StringFlag{
   117  			Name:  "newer-than",
   118  			Usage: "filter object(s) newer than value in duration string (e.g. 7d10h31s)",
   119  		},
   120  		cli.StringFlag{
   121  			Name:  "storage-class, sc",
   122  			Usage: "specify storage class for new object(s) on target",
   123  		},
   124  		cli.StringFlag{
   125  			Name:  "attr",
   126  			Usage: "add custom metadata for all objects",
   127  		},
   128  		cli.StringFlag{
   129  			Name:  "monitoring-address",
   130  			Usage: "if specified, a new prometheus endpoint will be created to report mirroring activity. (eg: localhost:8081)",
   131  		},
   132  		cli.BoolFlag{
   133  			Name:  "retry",
   134  			Usage: "if specified, will enable retrying on a per object basis if errors occur",
   135  		},
   136  		cli.BoolFlag{
   137  			Name:  "summary",
   138  			Usage: "print a summary of the mirror session",
   139  		},
   140  		cli.BoolFlag{
   141  			Name:  "skip-errors",
   142  			Usage: "skip any errors when mirroring",
   143  		},
   144  	}
   145  )
   146  
   147  // Mirror folders recursively from a single source to many destinations
   148  var mirrorCmd = cli.Command{
   149  	Name:         "mirror",
   150  	Usage:        "synchronize object(s) to a remote site",
   151  	Action:       mainMirror,
   152  	OnUsageError: onUsageError,
   153  	Before:       setGlobalsFromContext,
   154  	Flags:        append(append(mirrorFlags, encFlags...), globalFlags...),
   155  	CustomHelpTemplate: `NAME:
   156    {{.HelpName}} - {{.Usage}}
   157  
   158  USAGE:
   159    {{.HelpName}} [FLAGS] SOURCE TARGET
   160  
   161  FLAGS:
   162    {{range .VisibleFlags}}{{.}}
   163    {{end}}
   164  
   165  ENVIRONMENT VARIABLES:
   166    MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key).
   167    MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key).
   168  
   169  EXAMPLES:
   170    01. Mirror a bucket recursively from MinIO cloud storage to a bucket on Amazon S3 cloud storage.
   171        {{.Prompt}} {{.HelpName}} play/photos/2014 s3/backup-photos
   172  
   173    02. Mirror a local folder recursively to Amazon S3 cloud storage.
   174        {{.Prompt}} {{.HelpName}} backup/ s3/archive
   175  
   176    03. Only mirror files that are newer than 7 days, 10 hours and 30 minutes to Amazon S3 cloud storage.
   177        {{.Prompt}} {{.HelpName}} --newer-than "7d10h30m" backup/ s3/archive
   178  
   179    04. Mirror a bucket from aliased Amazon S3 cloud storage to a folder on Windows.
   180        {{.Prompt}} {{.HelpName}} s3\documents\2014\ C:\backup\2014
   181  
   182    05. Mirror a bucket from aliased Amazon S3 cloud storage to a local folder use '--overwrite' to overwrite destination.
   183        {{.Prompt}} {{.HelpName}} --overwrite s3/miniocloud miniocloud-backup
   184  
   185    06. Mirror a bucket from MinIO cloud storage to a bucket on Amazon S3 cloud storage and remove any extraneous
   186        files on Amazon S3 cloud storage.
   187        {{.Prompt}} {{.HelpName}} --remove play/photos/2014 s3/backup-photos/2014
   188  
   189    07. Continuously mirror a local folder recursively to MinIO cloud storage. '--watch' continuously watches for
   190        new objects, uploads and removes extraneous files on Amazon S3 cloud storage.
   191        {{.Prompt}} {{.HelpName}} --remove --watch /var/lib/backups play/backups
   192  
   193    08. Continuously mirror all buckets and objects from site 1 to site 2, removed buckets and objects will be reflected as well.
   194        {{.Prompt}} {{.HelpName}} --remove --watch site1-alias/ site2-alias/
   195  
   196    09. Mirror a bucket from aliased Amazon S3 cloud storage to a local folder.
   197        Exclude all .* files and *.temp files when mirroring.
   198        {{.Prompt}} {{.HelpName}} --exclude ".*" --exclude "*.temp" s3/test ~/test
   199  
   200    10. Mirror all buckets from aliased Amazon S3 cloud storage to a local folder.
   201        Exclude test* buckets and backup* buckets when mirroring.
   202        {{.Prompt}} {{.HelpName}} --exclude-bucket 'test*' --exclude 'backup*' s3 ~/test
   203  
   204    11. Mirror objects newer than 10 days from bucket test to a local folder.
   205        {{.Prompt}} {{.HelpName}} --newer-than 10d s3/test ~/localfolder
   206  
   207    12. Mirror objects older than 30 days from Amazon S3 bucket test to a local folder.
   208        {{.Prompt}} {{.HelpName}} --older-than 30d s3/test ~/test
   209  
   210    13. Mirror server encrypted objects from Amazon S3 cloud storage to a bucket on Amazon S3 cloud storage
   211        {{.Prompt}} {{.HelpName}} --enc-c "minio/archive=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" --enc-c "s3/archive=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5BBB" s3/archive/ minio/archive/ 
   212  
   213    14. Update 'Cache-Control' header on all existing objects recursively.
   214        {{.Prompt}} {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000" myminio/video-files myminio/video-files
   215  
   216    15. Mirror a local folder recursively to Amazon S3 cloud storage and preserve all local file attributes.
   217        {{.Prompt}} {{.HelpName}} -a backup/ s3/archive
   218  
   219    16. Cross mirror between sites in a active-active deployment.
   220        Site-A: {{.Prompt}} {{.HelpName}} --active-active siteA siteB
   221        Site-B: {{.Prompt}} {{.HelpName}} --active-active siteB siteA
   222  `,
   223  }
   224  
   225  var (
   226  	mirrorTotalOps = promauto.NewCounter(prometheus.CounterOpts{
   227  		Name: "mc_mirror_total_s3ops",
   228  		Help: "The total number of mirror operations",
   229  	})
   230  	mirrorTotalUploadedBytes = promauto.NewCounter(prometheus.CounterOpts{
   231  		Name: "mc_mirror_total_s3uploaded_bytes",
   232  		Help: "The total number of bytes uploaded",
   233  	})
   234  	mirrorFailedOps = promauto.NewCounter(prometheus.CounterOpts{
   235  		Name: "mc_mirror_failed_s3ops",
   236  		Help: "The total number of failed mirror operations",
   237  	})
   238  	mirrorRestarts = promauto.NewCounter(prometheus.CounterOpts{
   239  		Name: "mc_mirror_total_restarts",
   240  		Help: "The number of mirror restarts",
   241  	})
   242  	mirrorReplicationDurations = promauto.NewHistogramVec(
   243  		prometheus.HistogramOpts{
   244  			Name:    "mc_mirror_replication_duration",
   245  			Help:    "Histogram of replication time in ms per object sizes",
   246  			Buckets: prometheus.ExponentialBuckets(1, 20, 5),
   247  		},
   248  		[]string{"object_size"},
   249  	)
   250  )
   251  
   252  const uaMirrorAppName = "mc-mirror"
   253  
   254  type mirrorJob struct {
   255  	stopCh chan struct{}
   256  
   257  	// the global watcher object, which receives notifications of created
   258  	// and deleted files
   259  	watcher *Watcher
   260  
   261  	// Hold operation status information
   262  	status Status
   263  
   264  	parallel *ParallelManager
   265  
   266  	// channel for status messages
   267  	statusCh chan URLs
   268  
   269  	TotalObjects int64
   270  	TotalBytes   int64
   271  
   272  	sourceURL string
   273  	targetURL string
   274  
   275  	opts mirrorOptions
   276  }
   277  
   278  // mirrorMessage container for file mirror messages
   279  type mirrorMessage struct {
   280  	Status     string `json:"status"`
   281  	Source     string `json:"source"`
   282  	Target     string `json:"target"`
   283  	Size       int64  `json:"size"`
   284  	TotalCount int64  `json:"totalCount"`
   285  	TotalSize  int64  `json:"totalSize"`
   286  }
   287  
   288  // String colorized mirror message
   289  func (m mirrorMessage) String() string {
   290  	return console.Colorize("Mirror", fmt.Sprintf("`%s` -> `%s`", m.Source, m.Target))
   291  }
   292  
   293  // JSON jsonified mirror message
   294  func (m mirrorMessage) JSON() string {
   295  	m.Status = "success"
   296  	mirrorMessageBytes, e := json.MarshalIndent(m, "", " ")
   297  	fatalIf(probe.NewError(e), "Unable to marshal into JSON.")
   298  
   299  	return string(mirrorMessageBytes)
   300  }
   301  
   302  func (mj *mirrorJob) doCreateBucket(ctx context.Context, sURLs URLs) URLs {
   303  	if mj.opts.isFake {
   304  		return sURLs.WithError(nil)
   305  	}
   306  
   307  	// Construct proper path with alias.
   308  	aliasedURL := filepath.Join(sURLs.TargetAlias, sURLs.TargetContent.URL.Path)
   309  	clnt, pErr := newClient(aliasedURL)
   310  	if pErr != nil {
   311  		return sURLs.WithError(pErr)
   312  	}
   313  
   314  	err := clnt.MakeBucket(ctx, "", mj.opts.isOverwrite, false)
   315  	if err != nil {
   316  		return sURLs.WithError(err)
   317  	}
   318  
   319  	return sURLs.WithError(nil)
   320  }
   321  
   322  func (mj *mirrorJob) doDeleteBucket(ctx context.Context, sURLs URLs) URLs {
   323  	if mj.opts.isFake {
   324  		return sURLs.WithError(nil)
   325  	}
   326  
   327  	// Construct proper path with alias.
   328  	aliasedURL := filepath.Join(sURLs.TargetAlias, sURLs.TargetContent.URL.Path)
   329  	clnt, pErr := newClient(aliasedURL)
   330  	if pErr != nil {
   331  		return sURLs.WithError(pErr)
   332  	}
   333  
   334  	contentCh := make(chan *ClientContent, 1)
   335  	contentCh <- &ClientContent{URL: clnt.GetURL()}
   336  	close(contentCh)
   337  
   338  	for result := range clnt.Remove(ctx, false, true, false, false, contentCh) {
   339  		if result.Err != nil {
   340  			return sURLs.WithError(result.Err)
   341  		}
   342  	}
   343  
   344  	return sURLs.WithError(nil)
   345  }
   346  
   347  // doRemove - removes files on target.
   348  func (mj *mirrorJob) doRemove(ctx context.Context, sURLs URLs) URLs {
   349  	if mj.opts.isFake {
   350  		return sURLs.WithError(nil)
   351  	}
   352  
   353  	// Construct proper path with alias.
   354  	targetWithAlias := filepath.Join(sURLs.TargetAlias, sURLs.TargetContent.URL.Path)
   355  	clnt, pErr := newClient(targetWithAlias)
   356  	if pErr != nil {
   357  		return sURLs.WithError(pErr)
   358  	}
   359  	if sURLs.SourceAlias != "" {
   360  		clnt.AddUserAgent(uaMirrorAppName+":"+sURLs.SourceAlias, ReleaseTag)
   361  	} else {
   362  		clnt.AddUserAgent(uaMirrorAppName, ReleaseTag)
   363  	}
   364  	contentCh := make(chan *ClientContent, 1)
   365  	contentCh <- &ClientContent{URL: *newClientURL(sURLs.TargetContent.URL.Path)}
   366  	close(contentCh)
   367  	isRemoveBucket := false
   368  	resultCh := clnt.Remove(ctx, false, isRemoveBucket, false, false, contentCh)
   369  	for result := range resultCh {
   370  		if result.Err != nil {
   371  			switch result.Err.ToGoError().(type) {
   372  			case PathInsufficientPermission:
   373  				// Ignore Permission error.
   374  				continue
   375  			}
   376  			return sURLs.WithError(result.Err)
   377  		}
   378  	}
   379  
   380  	return sURLs.WithError(nil)
   381  }
   382  
   383  // doMirror - Mirror an object to multiple destination. URLs status contains a copy of sURLs and error if any.
   384  func (mj *mirrorJob) doMirrorWatch(ctx context.Context, targetPath string, tgtSSE encrypt.ServerSide, sURLs URLs) URLs {
   385  	shouldQueue := false
   386  	if !mj.opts.isOverwrite && !mj.opts.activeActive {
   387  		targetClient, err := newClient(targetPath)
   388  		if err != nil {
   389  			// cannot create targetclient
   390  			return sURLs.WithError(err)
   391  		}
   392  		_, err = targetClient.Stat(ctx, StatOptions{sse: tgtSSE})
   393  		if err == nil {
   394  			if !sURLs.SourceContent.RetentionEnabled && !sURLs.SourceContent.LegalHoldEnabled {
   395  				return sURLs.WithError(probe.NewError(ObjectAlreadyExists{}))
   396  			}
   397  		} // doesn't exist
   398  		shouldQueue = true
   399  	}
   400  	if shouldQueue || mj.opts.isOverwrite || mj.opts.activeActive {
   401  		// adjust total, because we want to show progress of
   402  		// the item still queued to be copied.
   403  		mj.status.Add(sURLs.SourceContent.Size)
   404  		mj.status.SetTotal(mj.status.Get()).Update()
   405  		mj.status.AddCounts(1)
   406  		sURLs.TotalSize = mj.status.Get()
   407  		sURLs.TotalCount = mj.status.GetCounts()
   408  		return mj.doMirror(ctx, sURLs)
   409  	}
   410  	return sURLs.WithError(probe.NewError(ObjectAlreadyExists{}))
   411  }
   412  
   413  func convertSizeToTag(size int64) string {
   414  	switch {
   415  	case size < 1024:
   416  		return "LESS_THAN_1_KiB"
   417  	case size < 1024*1024:
   418  		return "LESS_THAN_1_MiB"
   419  	case size < 10*1024*1024:
   420  		return "LESS_THAN_10_MiB"
   421  	case size < 100*1024*1024:
   422  		return "LESS_THAN_100_MiB"
   423  	case size < 1024*1024*1024:
   424  		return "LESS_THAN_1_GiB"
   425  	default:
   426  		return "GREATER_THAN_1_GiB"
   427  	}
   428  }
   429  
   430  // doMirror - Mirror an object to multiple destination. URLs status contains a copy of sURLs and error if any.
   431  func (mj *mirrorJob) doMirror(ctx context.Context, sURLs URLs) URLs {
   432  	if sURLs.Error != nil { // Erroneous sURLs passed.
   433  		return sURLs.WithError(sURLs.Error.Trace())
   434  	}
   435  
   436  	// For a fake mirror make sure we update respective progress bars
   437  	// and accounting readers under relevant conditions.
   438  	if mj.opts.isFake {
   439  		if sURLs.SourceContent != nil {
   440  			mj.status.Add(sURLs.SourceContent.Size)
   441  		}
   442  		mj.status.Update()
   443  		return sURLs.WithError(nil)
   444  	}
   445  
   446  	sourceAlias := sURLs.SourceAlias
   447  	sourceURL := sURLs.SourceContent.URL
   448  	targetAlias := sURLs.TargetAlias
   449  	targetURL := sURLs.TargetContent.URL
   450  	length := sURLs.SourceContent.Size
   451  
   452  	mj.status.SetCaption(sourceURL.String() + ":")
   453  
   454  	// Initialize target metadata.
   455  	sURLs.TargetContent.Metadata = make(map[string]string)
   456  
   457  	if mj.opts.storageClass != "" {
   458  		sURLs.TargetContent.StorageClass = mj.opts.storageClass
   459  	}
   460  
   461  	if mj.opts.activeActive {
   462  		srcModTime := getSourceModTimeKey(sURLs.SourceContent.Metadata)
   463  		// If the source object already has source modtime attribute set, then
   464  		// use it in target. Otherwise use the S3 modtime instead.
   465  		if srcModTime != "" {
   466  			sURLs.TargetContent.Metadata[activeActiveSourceModTimeKey] = srcModTime
   467  		} else {
   468  			sURLs.TargetContent.Metadata[activeActiveSourceModTimeKey] = sURLs.SourceContent.Time.Format(time.RFC3339Nano)
   469  		}
   470  	}
   471  
   472  	// Initialize additional target user metadata.
   473  	sURLs.TargetContent.UserMetadata = mj.opts.userMetadata
   474  
   475  	sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceURL.Path))
   476  	targetPath := filepath.ToSlash(filepath.Join(targetAlias, targetURL.Path))
   477  	if !mj.opts.isSummary {
   478  		mj.status.PrintMsg(mirrorMessage{
   479  			Source:     sourcePath,
   480  			Target:     targetPath,
   481  			Size:       length,
   482  			TotalCount: sURLs.TotalCount,
   483  			TotalSize:  sURLs.TotalSize,
   484  		})
   485  	}
   486  	sURLs.MD5 = mj.opts.md5
   487  	sURLs.DisableMultipart = mj.opts.disableMultipart
   488  
   489  	var ret URLs
   490  
   491  	if !mj.opts.isRetriable {
   492  		now := time.Now()
   493  		ret = uploadSourceToTargetURL(ctx, uploadSourceToTargetURLOpts{urls: sURLs, progress: mj.status, encKeyDB: mj.opts.encKeyDB, preserve: mj.opts.isMetadata, isZip: false})
   494  		if ret.Error == nil {
   495  			durationMs := time.Since(now).Milliseconds()
   496  			mirrorReplicationDurations.With(prometheus.Labels{"object_size": convertSizeToTag(sURLs.SourceContent.Size)}).Observe(float64(durationMs))
   497  		}
   498  
   499  		return ret
   500  	}
   501  
   502  	newRetryManager(ctx, time.Second, 3).retry(func(rm *retryManager) *probe.Error {
   503  		if rm.retries > 0 {
   504  			printMsg(retryMessage{
   505  				SourceURL: sURLs.SourceContent.URL.String(),
   506  				TargetURL: sURLs.TargetContent.URL.String(),
   507  				Retries:   rm.retries,
   508  			})
   509  		}
   510  
   511  		now := time.Now()
   512  		ret = uploadSourceToTargetURL(ctx, uploadSourceToTargetURLOpts{urls: sURLs, progress: mj.status, encKeyDB: mj.opts.encKeyDB, preserve: mj.opts.isMetadata, isZip: false})
   513  		if ret.Error == nil {
   514  			durationMs := time.Since(now).Milliseconds()
   515  			mirrorReplicationDurations.With(prometheus.Labels{"object_size": convertSizeToTag(sURLs.SourceContent.Size)}).Observe(float64(durationMs))
   516  		}
   517  
   518  		return ret.Error
   519  	})
   520  
   521  	return ret
   522  }
   523  
   524  // Update progress status
   525  func (mj *mirrorJob) monitorMirrorStatus(cancel context.CancelFunc) (errDuringMirror bool) {
   526  	// now we want to start the progress bar
   527  	mj.status.Start()
   528  	defer mj.status.Finish()
   529  
   530  	var cancelInProgress bool
   531  
   532  	for sURLs := range mj.statusCh {
   533  		if cancelInProgress {
   534  			// Do not need to print any error after
   535  			// canceling the context, just draining
   536  			// the status channel here.
   537  			continue
   538  		}
   539  
   540  		// Update prometheus fields
   541  		mirrorTotalOps.Inc()
   542  
   543  		if sURLs.Error != nil {
   544  			var ignoreErr bool
   545  
   546  			switch {
   547  			case sURLs.SourceContent != nil:
   548  				if isErrIgnored(sURLs.Error) {
   549  					ignoreErr = true
   550  				} else {
   551  					switch sURLs.Error.ToGoError().(type) {
   552  					case PathInsufficientPermission:
   553  						// Ignore Permission error.
   554  						ignoreErr = true
   555  					}
   556  					if !ignoreErr {
   557  						if !mj.opts.skipErrors {
   558  							errorIf(sURLs.Error.Trace(sURLs.SourceContent.URL.String()),
   559  								fmt.Sprintf("Failed to copy `%s`.", sURLs.SourceContent.URL.String()))
   560  						} else {
   561  							console.Infof("[Warn] Failed to copy `%s`. %s", sURLs.SourceContent.URL.String(), sURLs.Error.Trace(sURLs.SourceContent.URL.String()))
   562  						}
   563  					}
   564  				}
   565  			case sURLs.TargetContent != nil:
   566  				// When sURLs.SourceContent is nil, we know that we have an error related to removing
   567  				errorIf(sURLs.Error.Trace(sURLs.TargetContent.URL.String()),
   568  					fmt.Sprintf("Failed to remove `%s`.", sURLs.TargetContent.URL.String()))
   569  			default:
   570  				if strings.Contains(sURLs.Error.ToGoError().Error(), "Overwrite not allowed") {
   571  					ignoreErr = true
   572  				}
   573  				if sURLs.ErrorCond == differInUnknown {
   574  					errorIf(sURLs.Error.Trace(), "Failed to perform mirroring")
   575  				} else {
   576  					errorIf(sURLs.Error.Trace(),
   577  						"Failed to perform mirroring, with error condition (%s)", sURLs.ErrorCond)
   578  				}
   579  			}
   580  
   581  			if !ignoreErr {
   582  				mirrorFailedOps.Inc()
   583  				errDuringMirror = true
   584  				// Quit mirroring if --watch and --active-active are not passed
   585  				if !mj.opts.skipErrors && !mj.opts.activeActive && !mj.opts.isWatch {
   586  					cancel()
   587  					cancelInProgress = true
   588  				}
   589  			}
   590  
   591  			continue
   592  		}
   593  
   594  		if sURLs.SourceContent != nil {
   595  			mirrorTotalUploadedBytes.Add(float64(sURLs.SourceContent.Size))
   596  		} else if sURLs.TargetContent != nil {
   597  			// Construct user facing message and path.
   598  			targetPath := filepath.ToSlash(filepath.Join(sURLs.TargetAlias, sURLs.TargetContent.URL.Path))
   599  			mj.status.PrintMsg(rmMessage{Key: targetPath})
   600  		}
   601  	}
   602  
   603  	return
   604  }
   605  
   606  func (mj *mirrorJob) watchMirrorEvents(ctx context.Context, events []EventInfo) {
   607  	for _, event := range events {
   608  		// It will change the expanded alias back to the alias
   609  		// again, by replacing the sourceUrlFull with the sourceAlias.
   610  		// This url will be used to mirror.
   611  		sourceAlias, sourceURLFull, _ := mustExpandAlias(mj.sourceURL)
   612  
   613  		// If the passed source URL points to fs, fetch the absolute src path
   614  		// to correctly calculate targetPath
   615  		if sourceAlias == "" {
   616  			tmpSrcURL, e := filepath.Abs(sourceURLFull)
   617  			if e == nil {
   618  				sourceURLFull = tmpSrcURL
   619  			}
   620  		}
   621  		eventPath := event.Path
   622  		if runtime.GOOS == "darwin" {
   623  			// Strip the prefixes in the event path. Happens in darwin OS only
   624  			eventPath = eventPath[strings.Index(eventPath, sourceURLFull):]
   625  		} else if runtime.GOOS == "windows" {
   626  			// Shared folder as source URL and if event path is an absolute path.
   627  			eventPath = getEventPathURLWin(mj.sourceURL, eventPath)
   628  		}
   629  
   630  		sourceURL := newClientURL(eventPath)
   631  
   632  		// build target path, it is the relative of the eventPath with the sourceUrl
   633  		// joined to the targetURL.
   634  		sourceSuffix := strings.TrimPrefix(eventPath, sourceURLFull)
   635  		// Skip the object, if it matches the Exclude options provided
   636  		if matchExcludeOptions(mj.opts.excludeOptions, sourceSuffix, sourceURL.Type) {
   637  			continue
   638  		}
   639  		// Skip the bucket, if it matches the Exclude options provided
   640  		if matchExcludeBucketOptions(mj.opts.excludeBuckets, sourceSuffix) {
   641  			continue
   642  		}
   643  
   644  		sc, ok := event.UserMetadata["x-amz-storage-class"]
   645  		if ok {
   646  			var found bool
   647  			for _, esc := range mj.opts.excludeStorageClasses {
   648  				if esc == sc {
   649  					found = true
   650  					break
   651  				}
   652  			}
   653  			if found {
   654  				continue
   655  			}
   656  		}
   657  
   658  		targetPath := urlJoinPath(mj.targetURL, sourceSuffix)
   659  
   660  		// newClient needs the unexpanded  path, newCLientURL needs the expanded path
   661  		targetAlias, expandedTargetPath, _ := mustExpandAlias(targetPath)
   662  		targetURL := newClientURL(expandedTargetPath)
   663  		tgtSSE := getSSE(targetPath, mj.opts.encKeyDB[targetAlias])
   664  
   665  		if strings.HasPrefix(string(event.Type), "s3:ObjectCreated:") {
   666  			sourceModTime, _ := time.Parse(time.RFC3339Nano, event.Time)
   667  			mirrorURL := URLs{
   668  				SourceAlias: sourceAlias,
   669  				SourceContent: &ClientContent{
   670  					URL:              *sourceURL,
   671  					RetentionEnabled: event.Type == notification.EventType("s3:ObjectCreated:PutRetention"),
   672  					LegalHoldEnabled: event.Type == notification.EventType("s3:ObjectCreated:PutLegalHold"),
   673  					Size:             event.Size,
   674  					Time:             sourceModTime,
   675  					Metadata:         event.UserMetadata,
   676  				},
   677  				TargetAlias:      targetAlias,
   678  				TargetContent:    &ClientContent{URL: *targetURL},
   679  				MD5:              mj.opts.md5,
   680  				DisableMultipart: mj.opts.disableMultipart,
   681  				encKeyDB:         mj.opts.encKeyDB,
   682  			}
   683  			if mj.opts.activeActive &&
   684  				event.Type != notification.ObjectCreatedCopy &&
   685  				event.Type != notification.ObjectCreatedCompleteMultipartUpload &&
   686  				(getSourceModTimeKey(mirrorURL.SourceContent.Metadata) != "" ||
   687  					getSourceModTimeKey(mirrorURL.SourceContent.UserMetadata) != "") {
   688  				// If source has active-active attributes, it means that the
   689  				// object was uploaded by "mc mirror", hence ignore the event
   690  				// to avoid copying it.
   691  				continue
   692  			}
   693  			mj.parallel.queueTask(func() URLs {
   694  				return mj.doMirrorWatch(ctx, targetPath, tgtSSE, mirrorURL)
   695  			}, mirrorURL.SourceContent.Size)
   696  		} else if event.Type == notification.ObjectRemovedDelete {
   697  			if targetAlias != "" && strings.Contains(event.UserAgent, uaMirrorAppName+":"+targetAlias) {
   698  				// Ignore delete cascading delete events if cyclical.
   699  				continue
   700  			}
   701  			mirrorURL := URLs{
   702  				SourceAlias:      sourceAlias,
   703  				SourceContent:    nil,
   704  				TargetAlias:      targetAlias,
   705  				TargetContent:    &ClientContent{URL: *targetURL},
   706  				MD5:              mj.opts.md5,
   707  				DisableMultipart: mj.opts.disableMultipart,
   708  				encKeyDB:         mj.opts.encKeyDB,
   709  			}
   710  			mirrorURL.TotalCount = mj.status.GetCounts()
   711  			mirrorURL.TotalSize = mj.status.Get()
   712  			if mirrorURL.TargetContent != nil && (mj.opts.isRemove || mj.opts.activeActive) {
   713  				mj.parallel.queueTask(func() URLs {
   714  					return mj.doRemove(ctx, mirrorURL)
   715  				}, 0)
   716  			}
   717  		} else if event.Type == notification.BucketCreatedAll {
   718  			mirrorURL := URLs{
   719  				SourceAlias:   sourceAlias,
   720  				SourceContent: &ClientContent{URL: *sourceURL},
   721  				TargetAlias:   targetAlias,
   722  				TargetContent: &ClientContent{URL: *targetURL},
   723  			}
   724  			mj.parallel.queueTaskWithBarrier(func() URLs {
   725  				return mj.doCreateBucket(ctx, mirrorURL)
   726  			}, 0)
   727  		} else if event.Type == notification.BucketRemovedAll && mj.opts.isRemove {
   728  			mirrorURL := URLs{
   729  				TargetAlias:   targetAlias,
   730  				TargetContent: &ClientContent{URL: *targetURL},
   731  			}
   732  			mj.parallel.queueTaskWithBarrier(func() URLs {
   733  				return mj.doDeleteBucket(ctx, mirrorURL)
   734  			}, 0)
   735  		}
   736  
   737  	}
   738  }
   739  
   740  // this goroutine will watch for notifications, and add modified objects to the queue
   741  func (mj *mirrorJob) watchMirror(ctx context.Context) {
   742  	defer mj.watcher.Stop()
   743  
   744  	for {
   745  		select {
   746  		case events, ok := <-mj.watcher.Events():
   747  			if !ok {
   748  				return
   749  			}
   750  			mj.watchMirrorEvents(ctx, events)
   751  		case err, ok := <-mj.watcher.Errors():
   752  			if !ok {
   753  				return
   754  			}
   755  			switch err.ToGoError().(type) {
   756  			case APINotImplemented:
   757  				errorIf(err.Trace(),
   758  					"Unable to Watch on source, perhaps source doesn't support Watching for events")
   759  				return
   760  			}
   761  			if err != nil {
   762  				mj.parallel.queueTask(func() URLs {
   763  					return URLs{Error: err}
   764  				}, 0)
   765  			}
   766  		case <-ctx.Done():
   767  			return
   768  		}
   769  	}
   770  }
   771  
   772  func (mj *mirrorJob) watchURL(ctx context.Context, sourceClient Client) *probe.Error {
   773  	return mj.watcher.Join(ctx, sourceClient, true)
   774  }
   775  
   776  // Fetch urls that need to be mirrored
   777  func (mj *mirrorJob) startMirror(ctx context.Context) {
   778  	URLsCh := prepareMirrorURLs(ctx, mj.sourceURL, mj.targetURL, mj.opts)
   779  
   780  	for {
   781  		select {
   782  		case sURLs, ok := <-URLsCh:
   783  			if !ok {
   784  				return
   785  			}
   786  			if sURLs.Error != nil {
   787  				mj.statusCh <- sURLs
   788  				continue
   789  			}
   790  
   791  			if sURLs.SourceContent != nil {
   792  				if isOlder(sURLs.SourceContent.Time, mj.opts.olderThan) {
   793  					continue
   794  				}
   795  				if isNewer(sURLs.SourceContent.Time, mj.opts.newerThan) {
   796  					continue
   797  				}
   798  			}
   799  
   800  			if sURLs.SourceContent != nil {
   801  				mj.status.Add(sURLs.SourceContent.Size)
   802  			}
   803  
   804  			mj.status.SetTotal(mj.status.Get()).Update()
   805  			mj.status.AddCounts(1)
   806  
   807  			// Save total count.
   808  			sURLs.TotalCount = mj.status.GetCounts()
   809  			// Save totalSize.
   810  			sURLs.TotalSize = mj.status.Get()
   811  
   812  			if sURLs.SourceContent != nil {
   813  				mj.parallel.queueTask(func() URLs {
   814  					return mj.doMirror(ctx, sURLs)
   815  				}, sURLs.SourceContent.Size)
   816  			} else if sURLs.TargetContent != nil && mj.opts.isRemove {
   817  				mj.parallel.queueTask(func() URLs {
   818  					return mj.doRemove(ctx, sURLs)
   819  				}, 0)
   820  			}
   821  		case <-ctx.Done():
   822  			return
   823  		case <-mj.stopCh:
   824  			return
   825  		}
   826  	}
   827  }
   828  
   829  // when using a struct for copying, we could save a lot of passing of variables
   830  func (mj *mirrorJob) mirror(ctx context.Context) bool {
   831  	var wg sync.WaitGroup
   832  	ctx, cancel := context.WithCancel(ctx)
   833  
   834  	// Starts watcher loop for watching for new events.
   835  	if mj.opts.isWatch {
   836  		wg.Add(1)
   837  		go func() {
   838  			defer wg.Done()
   839  			mj.watchMirror(ctx)
   840  		}()
   841  	}
   842  
   843  	// Start mirroring.
   844  	wg.Add(1)
   845  	go func() {
   846  		defer wg.Done()
   847  		// startMirror locks and blocks itself.
   848  		mj.startMirror(ctx)
   849  	}()
   850  
   851  	// Close statusCh when both watch & mirror quits
   852  	go func() {
   853  		wg.Wait()
   854  		mj.parallel.stopAndWait()
   855  		close(mj.statusCh)
   856  	}()
   857  
   858  	return mj.monitorMirrorStatus(cancel)
   859  }
   860  
   861  func newMirrorJob(srcURL, dstURL string, opts mirrorOptions) *mirrorJob {
   862  	mj := mirrorJob{
   863  		stopCh: make(chan struct{}),
   864  
   865  		sourceURL: srcURL,
   866  		targetURL: dstURL,
   867  		opts:      opts,
   868  		statusCh:  make(chan URLs),
   869  		watcher:   NewWatcher(UTCNow()),
   870  	}
   871  
   872  	mj.parallel = newParallelManager(mj.statusCh)
   873  
   874  	// we'll define the status to use here,
   875  	// do we want the quiet status? or the progressbar
   876  	if globalQuiet {
   877  		mj.status = NewQuietStatus(mj.parallel)
   878  	} else if globalJSON {
   879  		mj.status = NewQuietStatus(mj.parallel)
   880  	} else {
   881  		mj.status = NewProgressStatus(mj.parallel)
   882  	}
   883  
   884  	return &mj
   885  }
   886  
   887  // copyBucketPolicies - copy policies from source to dest
   888  func copyBucketPolicies(ctx context.Context, srcClt, dstClt Client, isOverwrite bool) *probe.Error {
   889  	rules, err := srcClt.GetAccessRules(ctx)
   890  	if err != nil {
   891  		switch err.ToGoError().(type) {
   892  		case APINotImplemented:
   893  			return nil
   894  		}
   895  		return err
   896  	}
   897  	// Set found rules to target bucket if permitted
   898  	for _, r := range rules {
   899  		originalRule, _, err := dstClt.GetAccess(ctx)
   900  		if err != nil {
   901  			return err
   902  		}
   903  		// Set rule only if it doesn't exist in the target bucket
   904  		// or force flag is activated
   905  		if originalRule == "none" || isOverwrite {
   906  			err = dstClt.SetAccess(ctx, r, false)
   907  			if err != nil {
   908  				return err
   909  			}
   910  		}
   911  	}
   912  	return nil
   913  }
   914  
   915  func getEventPathURLWin(srcURL, eventPath string) string {
   916  	// A rename or move or sometimes even write event sets eventPath as an absolute filepath.
   917  	// If the watch folder is a shared folder the write events show the entire event path,
   918  	// from which we need to deduce the correct path relative to the source URL
   919  	var eventRelPath, lastPathPrefix string
   920  	var lastPathPrefixPos int
   921  	sourceURLpathList := strings.Split(srcURL, slashSeperator)
   922  	lenSrcURLSlice := len(sourceURLpathList)
   923  	shdModifyEventPath := filepath.IsAbs(eventPath) && !filepath.IsAbs(srcURL) && lenSrcURLSlice > 1
   924  
   925  	if shdModifyEventPath {
   926  		lastPathPrefix = sourceURLpathList[lenSrcURLSlice-1]
   927  		lastPathPrefixPos = strings.Index(eventPath, lastPathPrefix)
   928  	}
   929  	canModifyEventPath := shdModifyEventPath && lastPathPrefix != "" && lastPathPrefixPos > 0
   930  	canModifyEventPath = canModifyEventPath && lastPathPrefixPos+len(lastPathPrefix) < len(eventPath)
   931  	if canModifyEventPath {
   932  		eventRelPath = filepath.ToSlash(eventPath[lastPathPrefixPos+len(lastPathPrefix):])
   933  		eventPath = srcURL + eventRelPath
   934  	}
   935  	return eventPath
   936  }
   937  
   938  // runMirror - mirrors all buckets to another S3 server
   939  func runMirror(ctx context.Context, srcURL, dstURL string, cli *cli.Context, encKeyDB map[string][]prefixSSEPair) bool {
   940  	// Parse metadata.
   941  	userMetadata := make(map[string]string)
   942  	if cli.String("attr") != "" {
   943  		var err *probe.Error
   944  		userMetadata, err = getMetaDataEntry(cli.String("attr"))
   945  		fatalIf(err, "Unable to parse attribute %v", cli.String("attr"))
   946  	}
   947  
   948  	srcClt, err := newClient(srcURL)
   949  	fatalIf(err, "Unable to initialize `"+srcURL+"`.")
   950  
   951  	dstClt, err := newClient(dstURL)
   952  	fatalIf(err, "Unable to initialize `"+dstURL+"`.")
   953  
   954  	// This is kept for backward compatibility, `--force` means --overwrite.
   955  	isOverwrite := cli.Bool("force")
   956  	if !isOverwrite {
   957  		isOverwrite = cli.Bool("overwrite")
   958  	}
   959  
   960  	isWatch := cli.Bool("watch") || cli.Bool("multi-master") || cli.Bool("active-active")
   961  	isRemove := cli.Bool("remove")
   962  
   963  	// preserve is also expected to be overwritten if necessary
   964  	isMetadata := cli.Bool("a") || isWatch || len(userMetadata) > 0
   965  	isFake := cli.Bool("fake") || cli.Bool("dry-run")
   966  
   967  	mopts := mirrorOptions{
   968  		isFake:                isFake,
   969  		isRemove:              isRemove,
   970  		isOverwrite:           isOverwrite,
   971  		isWatch:               isWatch,
   972  		isMetadata:            isMetadata,
   973  		isSummary:             cli.Bool("summary"),
   974  		isRetriable:           cli.Bool("retry"),
   975  		md5:                   cli.Bool("md5"),
   976  		disableMultipart:      cli.Bool("disable-multipart"),
   977  		skipErrors:            cli.Bool("skip-errors"),
   978  		excludeOptions:        cli.StringSlice("exclude"),
   979  		excludeBuckets:        cli.StringSlice("exclude-bucket"),
   980  		excludeStorageClasses: cli.StringSlice("exclude-storageclass"),
   981  		olderThan:             cli.String("older-than"),
   982  		newerThan:             cli.String("newer-than"),
   983  		storageClass:          cli.String("storage-class"),
   984  		userMetadata:          userMetadata,
   985  		encKeyDB:              encKeyDB,
   986  		activeActive:          isWatch,
   987  	}
   988  
   989  	// Create a new mirror job and execute it
   990  	mj := newMirrorJob(srcURL, dstURL, mopts)
   991  
   992  	preserve := cli.Bool("preserve")
   993  
   994  	createDstBuckets := dstClt.GetURL().Type == objectStorage && dstClt.GetURL().Path == string(dstClt.GetURL().Separator)
   995  	mirrorSrcBuckets := srcClt.GetURL().Type == objectStorage && srcClt.GetURL().Path == string(srcClt.GetURL().Separator)
   996  	mirrorBucketsToBuckets := mirrorSrcBuckets && createDstBuckets
   997  
   998  	if mirrorSrcBuckets || createDstBuckets {
   999  		// Synchronize buckets using dirDifference function
  1000  		for d := range bucketDifference(ctx, srcClt, dstClt) {
  1001  			if d.Error != nil {
  1002  				if mj.opts.activeActive {
  1003  					errorIf(d.Error, "Failed to start mirroring.. retrying")
  1004  					return true
  1005  				}
  1006  				mj.status.fatalIf(d.Error, "Failed to start mirroring.")
  1007  			}
  1008  
  1009  			if d.Diff == differInSecond {
  1010  				diffBucket := strings.TrimPrefix(d.SecondURL, dstClt.GetURL().String())
  1011  				if !isFake && isRemove {
  1012  					aliasedDstBucket := path.Join(dstURL, diffBucket)
  1013  					err := deleteBucket(ctx, aliasedDstBucket, false)
  1014  					mj.status.fatalIf(err, "Failed to start mirroring.")
  1015  				}
  1016  				continue
  1017  			}
  1018  
  1019  			sourceSuffix := strings.TrimPrefix(d.FirstURL, srcClt.GetURL().String())
  1020  
  1021  			newSrcURL := path.Join(srcURL, sourceSuffix)
  1022  			newTgtURL := path.Join(dstURL, sourceSuffix)
  1023  
  1024  			newSrcClt, _ := newClient(newSrcURL)
  1025  			newDstClt, _ := newClient(newTgtURL)
  1026  
  1027  			if d.Diff == differInFirst {
  1028  				var (
  1029  					withLock bool
  1030  					mode     minio.RetentionMode
  1031  					validity uint64
  1032  					unit     minio.ValidityUnit
  1033  					err      *probe.Error
  1034  				)
  1035  				if preserve && mirrorBucketsToBuckets {
  1036  					_, mode, validity, unit, err = newSrcClt.GetObjectLockConfig(ctx)
  1037  					if err == nil {
  1038  						withLock = true
  1039  					}
  1040  				}
  1041  
  1042  				mj.status.PrintMsg(mirrorMessage{
  1043  					Source: newSrcURL,
  1044  					Target: newTgtURL,
  1045  				})
  1046  
  1047  				if mj.opts.isFake {
  1048  					continue
  1049  				}
  1050  
  1051  				// Skip create bucket, if it matches the Exclude options provided
  1052  				if matchExcludeBucketOptions(mopts.excludeBuckets, sourceSuffix) {
  1053  					continue
  1054  				}
  1055  
  1056  				// Bucket only exists in the source, create the same bucket in the destination
  1057  				if err := newDstClt.MakeBucket(ctx, cli.String("region"), false, withLock); err != nil {
  1058  					errorIf(err, "Unable to create bucket at `"+newTgtURL+"`.")
  1059  					continue
  1060  				}
  1061  				if preserve && mirrorBucketsToBuckets {
  1062  					// object lock configuration set on bucket
  1063  					if mode != "" {
  1064  						err = newDstClt.SetObjectLockConfig(ctx, mode, validity, unit)
  1065  						errorIf(err, "Unable to set object lock config in `"+newTgtURL+"`.")
  1066  						if err != nil && mj.opts.activeActive {
  1067  							return true
  1068  						}
  1069  						if err == nil {
  1070  							mj.opts.md5 = true
  1071  						}
  1072  					}
  1073  					errorIf(copyBucketPolicies(ctx, newSrcClt, newDstClt, isOverwrite),
  1074  						"Unable to copy bucket policies to `"+newDstClt.GetURL().String()+"`.")
  1075  				}
  1076  			}
  1077  		}
  1078  	}
  1079  
  1080  	if mj.opts.isWatch {
  1081  		// monitor mode will watch the source folders for changes,
  1082  		// and queue them for copying.
  1083  		if err := mj.watchURL(ctx, srcClt); err != nil {
  1084  			if mj.opts.activeActive {
  1085  				errorIf(err, "Failed to start monitoring.. retrying")
  1086  				return true
  1087  			}
  1088  			mj.status.fatalIf(err, "Failed to start monitoring.")
  1089  		}
  1090  	}
  1091  
  1092  	return mj.mirror(ctx)
  1093  }
  1094  
  1095  // Main entry point for mirror command.
  1096  func mainMirror(cliCtx *cli.Context) error {
  1097  	// Additional command specific theme customization.
  1098  	console.SetColor("Mirror", color.New(color.FgGreen, color.Bold))
  1099  
  1100  	ctx, cancelMirror := context.WithCancel(globalContext)
  1101  	defer cancelMirror()
  1102  
  1103  	encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx)
  1104  	fatalIf(err, "Unable to parse encryption keys.")
  1105  
  1106  	// check 'mirror' cli arguments.
  1107  	srcURL, tgtURL := checkMirrorSyntax(ctx, cliCtx, encKeyDB)
  1108  
  1109  	if prometheusAddress := cliCtx.String("monitoring-address"); prometheusAddress != "" {
  1110  		http.Handle("/metrics", promhttp.Handler())
  1111  		go func() {
  1112  			if e := http.ListenAndServe(prometheusAddress, nil); e != nil {
  1113  				fatalIf(probe.NewError(e), "Unable to setup monitoring endpoint.")
  1114  			}
  1115  		}()
  1116  	}
  1117  
  1118  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
  1119  	for {
  1120  		select {
  1121  		case <-ctx.Done():
  1122  			return exitStatus(globalErrorExitStatus)
  1123  		default:
  1124  			errorDetected := runMirror(ctx, srcURL, tgtURL, cliCtx, encKeyDB)
  1125  			if cliCtx.Bool("watch") || cliCtx.Bool("multi-master") || cliCtx.Bool("active-active") {
  1126  				mirrorRestarts.Inc()
  1127  				time.Sleep(time.Duration(r.Float64() * float64(2*time.Second)))
  1128  				continue
  1129  			}
  1130  			if errorDetected {
  1131  				return exitStatus(globalErrorExitStatus)
  1132  			}
  1133  			return nil
  1134  		}
  1135  	}
  1136  }