github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/fs/accounting/stats.go (about)

     1  package accounting
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"sort"
     9  	"strings"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/rclone/rclone/fs"
    14  	"github.com/rclone/rclone/fs/fserrors"
    15  	"github.com/rclone/rclone/fs/rc"
    16  	"github.com/rclone/rclone/lib/terminal"
    17  )
    18  
    19  const (
    20  	averagePeriodLength = time.Second
    21  	averageStopAfter    = time.Minute
    22  )
    23  
    24  // MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
    25  var MaxCompletedTransfers = 100
    26  
    27  // StatsInfo accounts all transfers
    28  // N.B.: if this struct is modified, please remember to also update sum() function in stats_groups
    29  // to correctly count the updated fields
    30  type StatsInfo struct {
    31  	mu                  sync.RWMutex
    32  	ctx                 context.Context
    33  	ci                  *fs.ConfigInfo
    34  	bytes               int64
    35  	errors              int64
    36  	lastError           error
    37  	fatalError          bool
    38  	retryError          bool
    39  	retryAfter          time.Time
    40  	checks              int64
    41  	checking            *transferMap
    42  	checkQueue          int
    43  	checkQueueSize      int64
    44  	transfers           int64
    45  	transferring        *transferMap
    46  	transferQueue       int
    47  	transferQueueSize   int64
    48  	renames             int64
    49  	renameQueue         int
    50  	renameQueueSize     int64
    51  	deletes             int64
    52  	deletesSize         int64
    53  	deletedDirs         int64
    54  	inProgress          *inProgress
    55  	startedTransfers    []*Transfer   // currently active transfers
    56  	oldTimeRanges       timeRanges    // a merged list of time ranges for the transfers
    57  	oldDuration         time.Duration // duration of transfers we have culled
    58  	group               string
    59  	startTime           time.Time // the moment these stats were initialized or reset
    60  	average             averageValues
    61  	serverSideCopies    int64
    62  	serverSideCopyBytes int64
    63  	serverSideMoves     int64
    64  	serverSideMoveBytes int64
    65  }
    66  
    67  type averageValues struct {
    68  	mu        sync.Mutex
    69  	lpBytes   int64
    70  	lpTime    time.Time
    71  	speed     float64
    72  	stop      chan bool
    73  	stopped   sync.WaitGroup
    74  	startOnce sync.Once
    75  	stopOnce  sync.Once
    76  }
    77  
    78  // NewStats creates an initialised StatsInfo
    79  func NewStats(ctx context.Context) *StatsInfo {
    80  	ci := fs.GetConfig(ctx)
    81  	return &StatsInfo{
    82  		ctx:          ctx,
    83  		ci:           ci,
    84  		checking:     newTransferMap(ci.Checkers, "checking"),
    85  		transferring: newTransferMap(ci.Transfers, "transferring"),
    86  		inProgress:   newInProgress(ctx),
    87  		startTime:    time.Now(),
    88  		average:      averageValues{stop: make(chan bool)},
    89  	}
    90  }
    91  
    92  // RemoteStats returns stats for rc
    93  func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
    94  	// NB if adding values here - make sure you update the docs in
    95  	// stats_groups.go
    96  
    97  	out = make(rc.Params)
    98  
    99  	ts := s.calculateTransferStats()
   100  	out["totalChecks"] = ts.totalChecks
   101  	out["totalTransfers"] = ts.totalTransfers
   102  	out["totalBytes"] = ts.totalBytes
   103  	out["transferTime"] = ts.transferTime
   104  	out["speed"] = ts.speed
   105  
   106  	s.mu.RLock()
   107  	out["bytes"] = s.bytes
   108  	out["errors"] = s.errors
   109  	out["fatalError"] = s.fatalError
   110  	out["retryError"] = s.retryError
   111  	out["checks"] = s.checks
   112  	out["transfers"] = s.transfers
   113  	out["deletes"] = s.deletes
   114  	out["deletedDirs"] = s.deletedDirs
   115  	out["renames"] = s.renames
   116  	out["elapsedTime"] = time.Since(s.startTime).Seconds()
   117  	out["serverSideCopies"] = s.serverSideCopies
   118  	out["serverSideCopyBytes"] = s.serverSideCopyBytes
   119  	out["serverSideMoves"] = s.serverSideMoves
   120  	out["serverSideMoveBytes"] = s.serverSideMoveBytes
   121  	eta, etaOK := eta(s.bytes, ts.totalBytes, ts.speed)
   122  	if etaOK {
   123  		out["eta"] = eta.Seconds()
   124  	} else {
   125  		out["eta"] = nil
   126  	}
   127  	s.mu.RUnlock()
   128  
   129  	if !s.checking.empty() {
   130  		out["checking"] = s.checking.remotes()
   131  	}
   132  	if !s.transferring.empty() {
   133  		out["transferring"] = s.transferring.rcStats(s.inProgress)
   134  	}
   135  	if s.errors > 0 {
   136  		out["lastError"] = s.lastError.Error()
   137  	}
   138  
   139  	return out, nil
   140  }
   141  
   142  // _speed returns the average speed of the transfer in bytes/second
   143  //
   144  // Call with lock held
   145  func (s *StatsInfo) _speed() float64 {
   146  	return s.average.speed
   147  }
   148  
   149  // timeRange is a start and end time of a transfer
   150  type timeRange struct {
   151  	start time.Time
   152  	end   time.Time
   153  }
   154  
   155  // timeRanges is a list of non-overlapping start and end times for
   156  // transfers
   157  type timeRanges []timeRange
   158  
   159  // merge all the overlapping time ranges
   160  func (trs *timeRanges) merge() {
   161  	Trs := *trs
   162  
   163  	// Sort by the starting time.
   164  	sort.Slice(Trs, func(i, j int) bool {
   165  		return Trs[i].start.Before(Trs[j].start)
   166  	})
   167  
   168  	// Merge overlaps and add distinctive ranges together
   169  	var (
   170  		newTrs = Trs[:0]
   171  		i, j   = 0, 1
   172  	)
   173  	for i < len(Trs) {
   174  		if j < len(Trs) {
   175  			if !Trs[i].end.Before(Trs[j].start) {
   176  				if Trs[i].end.Before(Trs[j].end) {
   177  					Trs[i].end = Trs[j].end
   178  				}
   179  				j++
   180  				continue
   181  			}
   182  		}
   183  		newTrs = append(newTrs, Trs[i])
   184  		i = j
   185  		j++
   186  	}
   187  
   188  	*trs = newTrs
   189  }
   190  
   191  // cull remove any ranges whose start and end are before cutoff
   192  // returning their duration sum
   193  func (trs *timeRanges) cull(cutoff time.Time) (d time.Duration) {
   194  	var newTrs = (*trs)[:0]
   195  	for _, tr := range *trs {
   196  		if cutoff.Before(tr.start) || cutoff.Before(tr.end) {
   197  			newTrs = append(newTrs, tr)
   198  		} else {
   199  			d += tr.end.Sub(tr.start)
   200  		}
   201  	}
   202  	*trs = newTrs
   203  	return d
   204  }
   205  
   206  // total the time out of the time ranges
   207  func (trs timeRanges) total() (total time.Duration) {
   208  	for _, tr := range trs {
   209  		total += tr.end.Sub(tr.start)
   210  	}
   211  	return total
   212  }
   213  
   214  // Total duration is union of durations of all transfers belonging to this
   215  // object.
   216  //
   217  // Needs to be protected by mutex.
   218  func (s *StatsInfo) _totalDuration() time.Duration {
   219  	// copy of s.oldTimeRanges with extra room for the current transfers
   220  	timeRanges := make(timeRanges, len(s.oldTimeRanges), len(s.oldTimeRanges)+len(s.startedTransfers))
   221  	copy(timeRanges, s.oldTimeRanges)
   222  
   223  	// Extract time ranges of all transfers.
   224  	now := time.Now()
   225  	for i := range s.startedTransfers {
   226  		start, end := s.startedTransfers[i].TimeRange()
   227  		if end.IsZero() {
   228  			end = now
   229  		}
   230  		timeRanges = append(timeRanges, timeRange{start, end})
   231  	}
   232  
   233  	timeRanges.merge()
   234  	return s.oldDuration + timeRanges.total()
   235  }
   236  
   237  const (
   238  	etaMaxSeconds = (1<<63 - 1) / int64(time.Second)           // Largest possible ETA as number of seconds
   239  	etaMax        = time.Duration(etaMaxSeconds) * time.Second // Largest possible ETA, which is in second precision, representing "292y24w3d23h47m16s"
   240  )
   241  
   242  // eta returns the ETA of the current operation,
   243  // rounded to full seconds.
   244  // If the ETA cannot be determined 'ok' returns false.
   245  func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
   246  	if total <= 0 || size < 0 || rate <= 0 {
   247  		return 0, false
   248  	}
   249  	remaining := total - size
   250  	if remaining < 0 {
   251  		return 0, false
   252  	}
   253  	seconds := int64(float64(remaining) / rate)
   254  	if seconds < 0 {
   255  		// Got Int64 overflow
   256  		eta = etaMax
   257  	} else if seconds >= etaMaxSeconds {
   258  		// Would get Int64 overflow if converting from seconds to Duration (nanoseconds)
   259  		eta = etaMax
   260  	} else {
   261  		eta = time.Duration(seconds) * time.Second
   262  	}
   263  	return eta, true
   264  }
   265  
   266  // etaString returns the ETA of the current operation,
   267  // rounded to full seconds.
   268  // If the ETA cannot be determined it returns "-"
   269  func etaString(done, total int64, rate float64) string {
   270  	d, ok := eta(done, total, rate)
   271  	if !ok {
   272  		return "-"
   273  	}
   274  	if d == etaMax {
   275  		return "-"
   276  	}
   277  	return fs.Duration(d).ShortReadableString()
   278  }
   279  
   280  // percent returns a/b as a percentage rounded to the nearest integer
   281  // as a string
   282  //
   283  // if the percentage is invalid it returns "-"
   284  func percent(a int64, b int64) string {
   285  	if a < 0 || b <= 0 {
   286  		return "-"
   287  	}
   288  	return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
   289  }
   290  
   291  // returned from calculateTransferStats
   292  type transferStats struct {
   293  	totalChecks    int64
   294  	totalTransfers int64
   295  	totalBytes     int64
   296  	transferTime   float64
   297  	speed          float64
   298  }
   299  
   300  // calculateTransferStats calculates some additional transfer stats not
   301  // stored directly in StatsInfo
   302  func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
   303  	// checking and transferring have their own locking so read
   304  	// here before lock to prevent deadlock on GetBytes
   305  	transferring, checking := s.transferring.count(), s.checking.count()
   306  	transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
   307  
   308  	s.mu.RLock()
   309  	defer s.mu.RUnlock()
   310  
   311  	ts.totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
   312  	ts.totalTransfers = int64(s.transferQueue) + s.transfers + int64(transferring)
   313  	// note that s.bytes already includes transferringBytesDone so
   314  	// we take it off here to avoid double counting
   315  	ts.totalBytes = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
   316  	ts.speed = s.average.speed
   317  	dt := s._totalDuration()
   318  	ts.transferTime = dt.Seconds()
   319  
   320  	return ts
   321  }
   322  
   323  func (s *StatsInfo) averageLoop() {
   324  	var period float64
   325  
   326  	ticker := time.NewTicker(averagePeriodLength)
   327  	defer ticker.Stop()
   328  
   329  	startTime := time.Now()
   330  	a := &s.average
   331  	defer a.stopped.Done()
   332  	for {
   333  		select {
   334  		case now := <-ticker.C:
   335  			a.mu.Lock()
   336  			var elapsed float64
   337  			if a.lpTime.IsZero() {
   338  				elapsed = now.Sub(startTime).Seconds()
   339  			} else {
   340  				elapsed = now.Sub(a.lpTime).Seconds()
   341  			}
   342  			avg := 0.0
   343  			if elapsed > 0 {
   344  				avg = float64(a.lpBytes) / elapsed
   345  			}
   346  			if period < averagePeriod {
   347  				period++
   348  			}
   349  			a.speed = (avg + a.speed*(period-1)) / period
   350  			a.lpBytes = 0
   351  			a.lpTime = now
   352  			a.mu.Unlock()
   353  		case <-a.stop:
   354  			return
   355  		}
   356  	}
   357  }
   358  
   359  // Start the average loop
   360  func (s *StatsInfo) startAverageLoop() {
   361  	s.mu.RLock()
   362  	defer s.mu.RUnlock()
   363  	s.average.startOnce.Do(func() {
   364  		s.average.stopped.Add(1)
   365  		go s.averageLoop()
   366  	})
   367  }
   368  
   369  // Stop the average loop
   370  //
   371  // Call with the mutex held
   372  func (s *StatsInfo) _stopAverageLoop() {
   373  	s.average.stopOnce.Do(func() {
   374  		close(s.average.stop)
   375  		s.average.stopped.Wait()
   376  	})
   377  }
   378  
   379  // Stop the average loop
   380  func (s *StatsInfo) stopAverageLoop() {
   381  	s.mu.RLock()
   382  	defer s.mu.RUnlock()
   383  	s._stopAverageLoop()
   384  }
   385  
   386  // String convert the StatsInfo to a string for printing
   387  func (s *StatsInfo) String() string {
   388  	// NB if adding more stats in here, remember to add them into
   389  	// RemoteStats() too.
   390  
   391  	ts := s.calculateTransferStats()
   392  
   393  	s.mu.RLock()
   394  
   395  	var (
   396  		buf                    = &bytes.Buffer{}
   397  		xfrchkString           = ""
   398  		dateString             = ""
   399  		elapsedTime            = time.Since(s.startTime)
   400  		elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute
   401  		displaySpeedString     string
   402  	)
   403  
   404  	if s.ci.DataRateUnit == "bits" {
   405  		displaySpeedString = fs.SizeSuffix(ts.speed * 8).BitRateUnit()
   406  	} else {
   407  		displaySpeedString = fs.SizeSuffix(ts.speed).ByteRateUnit()
   408  	}
   409  
   410  	if !s.ci.StatsOneLine {
   411  		_, _ = fmt.Fprintf(buf, "\nTransferred:   	")
   412  	} else {
   413  		xfrchk := []string{}
   414  		if ts.totalTransfers > 0 && s.transferQueue > 0 {
   415  			xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, ts.totalTransfers))
   416  		}
   417  		if ts.totalChecks > 0 && s.checkQueue > 0 {
   418  			xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, ts.totalChecks))
   419  		}
   420  		if len(xfrchk) > 0 {
   421  			xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
   422  		}
   423  		if s.ci.StatsOneLineDate {
   424  			t := time.Now()
   425  			dateString = t.Format(s.ci.StatsOneLineDateFormat) // Including the separator so people can customize it
   426  		}
   427  	}
   428  
   429  	_, _ = fmt.Fprintf(buf, "%s%13s / %s, %s, %s, ETA %s%s",
   430  		dateString,
   431  		fs.SizeSuffix(s.bytes).ByteUnit(),
   432  		fs.SizeSuffix(ts.totalBytes).ByteUnit(),
   433  		percent(s.bytes, ts.totalBytes),
   434  		displaySpeedString,
   435  		etaString(s.bytes, ts.totalBytes, ts.speed),
   436  		xfrchkString,
   437  	)
   438  
   439  	if s.ci.ProgressTerminalTitle {
   440  		// Writes ETA to the terminal title
   441  		terminal.WriteTerminalTitle("ETA: " + etaString(s.bytes, ts.totalBytes, ts.speed))
   442  	}
   443  
   444  	if !s.ci.StatsOneLine {
   445  		_, _ = buf.WriteRune('\n')
   446  		errorDetails := ""
   447  		switch {
   448  		case s.fatalError:
   449  			errorDetails = " (fatal error encountered)"
   450  		case s.retryError:
   451  			errorDetails = " (retrying may help)"
   452  		case s.errors != 0:
   453  			errorDetails = " (no need to retry)"
   454  
   455  		}
   456  
   457  		// Add only non zero stats
   458  		if s.errors != 0 {
   459  			_, _ = fmt.Fprintf(buf, "Errors:        %10d%s\n",
   460  				s.errors, errorDetails)
   461  		}
   462  		if s.checks != 0 || ts.totalChecks != 0 {
   463  			_, _ = fmt.Fprintf(buf, "Checks:        %10d / %d, %s\n",
   464  				s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks))
   465  		}
   466  		if s.deletes != 0 || s.deletedDirs != 0 {
   467  			_, _ = fmt.Fprintf(buf, "Deleted:       %10d (files), %d (dirs), %s (freed)\n", s.deletes, s.deletedDirs, fs.SizeSuffix(s.deletesSize).ByteUnit())
   468  		}
   469  		if s.renames != 0 {
   470  			_, _ = fmt.Fprintf(buf, "Renamed:       %10d\n", s.renames)
   471  		}
   472  		if s.transfers != 0 || ts.totalTransfers != 0 {
   473  			_, _ = fmt.Fprintf(buf, "Transferred:   %10d / %d, %s\n",
   474  				s.transfers, ts.totalTransfers, percent(s.transfers, ts.totalTransfers))
   475  		}
   476  		if s.serverSideCopies != 0 || s.serverSideCopyBytes != 0 {
   477  			_, _ = fmt.Fprintf(buf, "Server Side Copies:%6d @ %s\n",
   478  				s.serverSideCopies, fs.SizeSuffix(s.serverSideCopyBytes).ByteUnit(),
   479  			)
   480  		}
   481  		if s.serverSideMoves != 0 || s.serverSideMoveBytes != 0 {
   482  			_, _ = fmt.Fprintf(buf, "Server Side Moves:%7d @ %s\n",
   483  				s.serverSideMoves, fs.SizeSuffix(s.serverSideMoveBytes).ByteUnit(),
   484  			)
   485  		}
   486  		_, _ = fmt.Fprintf(buf, "Elapsed time:  %10ss\n", strings.TrimRight(fs.Duration(elapsedTime.Truncate(time.Minute)).ReadableString(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds()))
   487  	}
   488  
   489  	// checking and transferring have their own locking so unlock
   490  	// here to prevent deadlock on GetBytes
   491  	s.mu.RUnlock()
   492  
   493  	// Add per transfer stats if required
   494  	if !s.ci.StatsOneLine {
   495  		if !s.checking.empty() {
   496  			_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring))
   497  		}
   498  		if !s.transferring.empty() {
   499  			_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.ctx, s.inProgress, nil))
   500  		}
   501  	}
   502  
   503  	return buf.String()
   504  }
   505  
   506  // Transferred returns list of all completed transfers including checked and
   507  // failed ones.
   508  func (s *StatsInfo) Transferred() []TransferSnapshot {
   509  	s.mu.RLock()
   510  	defer s.mu.RUnlock()
   511  	ts := make([]TransferSnapshot, 0, len(s.startedTransfers))
   512  
   513  	for _, tr := range s.startedTransfers {
   514  		if tr.IsDone() {
   515  			ts = append(ts, tr.Snapshot())
   516  		}
   517  	}
   518  
   519  	return ts
   520  }
   521  
   522  // Log outputs the StatsInfo to the log
   523  func (s *StatsInfo) Log() {
   524  	if s.ci.UseJSONLog {
   525  		out, _ := s.RemoteStats()
   526  		fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v%v\n", s, fs.LogValueHide("stats", out))
   527  	} else {
   528  		fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v\n", s)
   529  	}
   530  
   531  }
   532  
   533  // Bytes updates the stats for bytes bytes
   534  func (s *StatsInfo) Bytes(bytes int64) {
   535  	s.average.mu.Lock()
   536  	s.average.lpBytes += bytes
   537  	s.average.mu.Unlock()
   538  
   539  	s.mu.Lock()
   540  	defer s.mu.Unlock()
   541  	s.bytes += bytes
   542  }
   543  
   544  // BytesNoNetwork updates the stats for bytes bytes but doesn't include the transfer stats
   545  func (s *StatsInfo) BytesNoNetwork(bytes int64) {
   546  	s.mu.Lock()
   547  	defer s.mu.Unlock()
   548  	s.bytes += bytes
   549  }
   550  
   551  // GetBytes returns the number of bytes transferred so far
   552  func (s *StatsInfo) GetBytes() int64 {
   553  	s.mu.RLock()
   554  	defer s.mu.RUnlock()
   555  	return s.bytes
   556  }
   557  
   558  // GetBytesWithPending returns the number of bytes transferred and remaining transfers
   559  func (s *StatsInfo) GetBytesWithPending() int64 {
   560  	s.mu.RLock()
   561  	defer s.mu.RUnlock()
   562  	pending := int64(0)
   563  	for _, tr := range s.startedTransfers {
   564  		if tr.acc != nil {
   565  			bytes, size := tr.acc.progress()
   566  			if bytes < size {
   567  				pending += size - bytes
   568  			}
   569  		}
   570  	}
   571  	return s.bytes + pending
   572  }
   573  
   574  // Errors updates the stats for errors
   575  func (s *StatsInfo) Errors(errors int64) {
   576  	s.mu.Lock()
   577  	defer s.mu.Unlock()
   578  	s.errors += errors
   579  }
   580  
   581  // GetErrors reads the number of errors
   582  func (s *StatsInfo) GetErrors() int64 {
   583  	s.mu.RLock()
   584  	defer s.mu.RUnlock()
   585  	return s.errors
   586  }
   587  
   588  // GetLastError returns the lastError
   589  func (s *StatsInfo) GetLastError() error {
   590  	s.mu.RLock()
   591  	defer s.mu.RUnlock()
   592  	return s.lastError
   593  }
   594  
   595  // GetChecks returns the number of checks
   596  func (s *StatsInfo) GetChecks() int64 {
   597  	s.mu.RLock()
   598  	defer s.mu.RUnlock()
   599  	return s.checks
   600  }
   601  
   602  // FatalError sets the fatalError flag
   603  func (s *StatsInfo) FatalError() {
   604  	s.mu.Lock()
   605  	defer s.mu.Unlock()
   606  	s.fatalError = true
   607  }
   608  
   609  // HadFatalError returns whether there has been at least one FatalError
   610  func (s *StatsInfo) HadFatalError() bool {
   611  	s.mu.RLock()
   612  	defer s.mu.RUnlock()
   613  	return s.fatalError
   614  }
   615  
   616  // RetryError sets the retryError flag
   617  func (s *StatsInfo) RetryError() {
   618  	s.mu.Lock()
   619  	defer s.mu.Unlock()
   620  	s.retryError = true
   621  }
   622  
   623  // HadRetryError returns whether there has been at least one non-NoRetryError
   624  func (s *StatsInfo) HadRetryError() bool {
   625  	s.mu.RLock()
   626  	defer s.mu.RUnlock()
   627  	return s.retryError
   628  }
   629  
   630  var (
   631  	errMaxDelete     = fserrors.FatalError(errors.New("--max-delete threshold reached"))
   632  	errMaxDeleteSize = fserrors.FatalError(errors.New("--max-delete-size threshold reached"))
   633  )
   634  
   635  // DeleteFile updates the stats for deleting a file
   636  //
   637  // It may return fatal errors if the threshold for --max-delete or
   638  // --max-delete-size have been reached.
   639  func (s *StatsInfo) DeleteFile(ctx context.Context, size int64) error {
   640  	ci := fs.GetConfig(ctx)
   641  	s.mu.Lock()
   642  	defer s.mu.Unlock()
   643  	if size < 0 {
   644  		size = 0
   645  	}
   646  	if ci.MaxDelete >= 0 && s.deletes+1 > ci.MaxDelete {
   647  		return errMaxDelete
   648  	}
   649  	if ci.MaxDeleteSize >= 0 && s.deletesSize+size > int64(ci.MaxDeleteSize) {
   650  		return errMaxDeleteSize
   651  	}
   652  	s.deletes++
   653  	s.deletesSize += size
   654  	return nil
   655  }
   656  
   657  // GetDeletes returns the number of deletes
   658  func (s *StatsInfo) GetDeletes() int64 {
   659  	s.mu.Lock()
   660  	defer s.mu.Unlock()
   661  	return s.deletes
   662  }
   663  
   664  // DeletedDirs updates the stats for deletedDirs
   665  func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64 {
   666  	s.mu.Lock()
   667  	defer s.mu.Unlock()
   668  	s.deletedDirs += deletedDirs
   669  	return s.deletedDirs
   670  }
   671  
   672  // Renames updates the stats for renames
   673  func (s *StatsInfo) Renames(renames int64) int64 {
   674  	s.mu.Lock()
   675  	defer s.mu.Unlock()
   676  	s.renames += renames
   677  	return s.renames
   678  }
   679  
   680  // ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames) to 0 and resets lastError, fatalError and retryError
   681  func (s *StatsInfo) ResetCounters() {
   682  	s.mu.Lock()
   683  	defer s.mu.Unlock()
   684  	s.bytes = 0
   685  	s.errors = 0
   686  	s.lastError = nil
   687  	s.fatalError = false
   688  	s.retryError = false
   689  	s.retryAfter = time.Time{}
   690  	s.checks = 0
   691  	s.transfers = 0
   692  	s.deletes = 0
   693  	s.deletesSize = 0
   694  	s.deletedDirs = 0
   695  	s.renames = 0
   696  	s.startedTransfers = nil
   697  	s.oldDuration = 0
   698  
   699  	s._stopAverageLoop()
   700  	s.average = averageValues{stop: make(chan bool)}
   701  }
   702  
   703  // ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
   704  func (s *StatsInfo) ResetErrors() {
   705  	s.mu.Lock()
   706  	defer s.mu.Unlock()
   707  	s.errors = 0
   708  	s.lastError = nil
   709  	s.fatalError = false
   710  	s.retryError = false
   711  	s.retryAfter = time.Time{}
   712  }
   713  
   714  // Errored returns whether there have been any errors
   715  func (s *StatsInfo) Errored() bool {
   716  	s.mu.RLock()
   717  	defer s.mu.RUnlock()
   718  	return s.errors != 0
   719  }
   720  
   721  // Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
   722  func (s *StatsInfo) Error(err error) error {
   723  	if err == nil || fserrors.IsCounted(err) {
   724  		return err
   725  	}
   726  	s.mu.Lock()
   727  	defer s.mu.Unlock()
   728  	s.errors++
   729  	s.lastError = err
   730  	err = fserrors.FsError(err)
   731  	fserrors.Count(err)
   732  	switch {
   733  	case fserrors.IsFatalError(err):
   734  		s.fatalError = true
   735  	case fserrors.IsRetryAfterError(err):
   736  		retryAfter := fserrors.RetryAfterErrorTime(err)
   737  		if s.retryAfter.IsZero() || retryAfter.Sub(s.retryAfter) > 0 {
   738  			s.retryAfter = retryAfter
   739  		}
   740  		s.retryError = true
   741  	case !fserrors.IsNoRetryError(err):
   742  		s.retryError = true
   743  	}
   744  	return err
   745  }
   746  
   747  // RetryAfter returns the time to retry after if it is set.  It will
   748  // be Zero if it isn't set.
   749  func (s *StatsInfo) RetryAfter() time.Time {
   750  	s.mu.Lock()
   751  	defer s.mu.Unlock()
   752  	return s.retryAfter
   753  }
   754  
   755  // NewCheckingTransfer adds a checking transfer to the stats, from the object.
   756  func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer {
   757  	tr := newCheckingTransfer(s, obj, what)
   758  	s.checking.add(tr)
   759  	return tr
   760  }
   761  
   762  // DoneChecking removes a check from the stats
   763  func (s *StatsInfo) DoneChecking(remote string) {
   764  	s.checking.del(remote)
   765  	s.mu.Lock()
   766  	s.checks++
   767  	s.mu.Unlock()
   768  }
   769  
   770  // GetTransfers reads the number of transfers
   771  func (s *StatsInfo) GetTransfers() int64 {
   772  	s.mu.RLock()
   773  	defer s.mu.RUnlock()
   774  	return s.transfers
   775  }
   776  
   777  // NewTransfer adds a transfer to the stats from the object.
   778  //
   779  // The obj is uses as the srcFs, the dstFs must be supplied
   780  func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
   781  	var srcFs fs.Fs
   782  	if oi, ok := obj.(fs.ObjectInfo); ok {
   783  		if f, ok := oi.Fs().(fs.Fs); ok {
   784  			srcFs = f
   785  		}
   786  	}
   787  	tr := newTransfer(s, obj, srcFs, dstFs)
   788  	s.transferring.add(tr)
   789  	s.startAverageLoop()
   790  	return tr
   791  }
   792  
   793  // NewTransferRemoteSize adds a transfer to the stats based on remote and size.
   794  func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
   795  	tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
   796  	s.transferring.add(tr)
   797  	s.startAverageLoop()
   798  	return tr
   799  }
   800  
   801  // DoneTransferring removes a transfer from the stats
   802  //
   803  // if ok is true and it was in the transfermap (to avoid incrementing in case of nested calls, #6213) then it increments the transfers count
   804  func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
   805  	existed := s.transferring.del(remote)
   806  	if ok && existed {
   807  		s.mu.Lock()
   808  		s.transfers++
   809  		s.mu.Unlock()
   810  	}
   811  	if s.transferring.empty() && s.checking.empty() {
   812  		time.AfterFunc(averageStopAfter, s.stopAverageLoop)
   813  	}
   814  }
   815  
   816  // SetCheckQueue sets the number of queued checks
   817  func (s *StatsInfo) SetCheckQueue(n int, size int64) {
   818  	s.mu.Lock()
   819  	s.checkQueue = n
   820  	s.checkQueueSize = size
   821  	s.mu.Unlock()
   822  }
   823  
   824  // SetTransferQueue sets the number of queued transfers
   825  func (s *StatsInfo) SetTransferQueue(n int, size int64) {
   826  	s.mu.Lock()
   827  	s.transferQueue = n
   828  	s.transferQueueSize = size
   829  	s.mu.Unlock()
   830  }
   831  
   832  // SetRenameQueue sets the number of queued transfers
   833  func (s *StatsInfo) SetRenameQueue(n int, size int64) {
   834  	s.mu.Lock()
   835  	s.renameQueue = n
   836  	s.renameQueueSize = size
   837  	s.mu.Unlock()
   838  }
   839  
   840  // AddTransfer adds reference to the started transfer.
   841  func (s *StatsInfo) AddTransfer(transfer *Transfer) {
   842  	s.mu.Lock()
   843  	s.startedTransfers = append(s.startedTransfers, transfer)
   844  	s.mu.Unlock()
   845  }
   846  
   847  // _removeTransfer removes a reference to the started transfer in
   848  // position i.
   849  //
   850  // Must be called with the lock held
   851  func (s *StatsInfo) _removeTransfer(transfer *Transfer, i int) {
   852  	now := time.Now()
   853  
   854  	// add finished transfer onto old time ranges
   855  	start, end := transfer.TimeRange()
   856  	if end.IsZero() {
   857  		end = now
   858  	}
   859  	s.oldTimeRanges = append(s.oldTimeRanges, timeRange{start, end})
   860  	s.oldTimeRanges.merge()
   861  
   862  	// remove the found entry
   863  	s.startedTransfers = append(s.startedTransfers[:i], s.startedTransfers[i+1:]...)
   864  
   865  	// Find youngest active transfer
   866  	oldestStart := now
   867  	for i := range s.startedTransfers {
   868  		start, _ := s.startedTransfers[i].TimeRange()
   869  		if start.Before(oldestStart) {
   870  			oldestStart = start
   871  		}
   872  	}
   873  
   874  	// remove old entries older than that
   875  	s.oldDuration += s.oldTimeRanges.cull(oldestStart)
   876  }
   877  
   878  // RemoveTransfer removes a reference to the started transfer.
   879  func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
   880  	s.mu.Lock()
   881  	for i, tr := range s.startedTransfers {
   882  		if tr == transfer {
   883  			s._removeTransfer(tr, i)
   884  			break
   885  		}
   886  	}
   887  	s.mu.Unlock()
   888  }
   889  
   890  // PruneTransfers makes sure there aren't too many old transfers by removing
   891  // single finished transfer.
   892  func (s *StatsInfo) PruneTransfers() {
   893  	if MaxCompletedTransfers < 0 {
   894  		return
   895  	}
   896  	s.mu.Lock()
   897  	// remove a transfer from the start if we are over quota
   898  	if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers {
   899  		for i, tr := range s.startedTransfers {
   900  			if tr.IsDone() {
   901  				s._removeTransfer(tr, i)
   902  				break
   903  			}
   904  		}
   905  	}
   906  	s.mu.Unlock()
   907  }
   908  
   909  // AddServerSideMove counts a server side move
   910  func (s *StatsInfo) AddServerSideMove(n int64) {
   911  	s.mu.Lock()
   912  	s.serverSideMoves += 1
   913  	s.serverSideMoveBytes += n
   914  	s.mu.Unlock()
   915  }
   916  
   917  // AddServerSideCopy counts a server side copy
   918  func (s *StatsInfo) AddServerSideCopy(n int64) {
   919  	s.mu.Lock()
   920  	s.serverSideCopies += 1
   921  	s.serverSideCopyBytes += n
   922  	s.mu.Unlock()
   923  }