github.com/artpar/rclone@v1.67.3/backend/cache/cache.go (about)

     1  //go:build !plan9 && !js
     2  
     3  // Package cache implements a virtual provider to cache existing remotes.
     4  package cache
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"math"
    12  	"os"
    13  	"os/signal"
    14  	"path"
    15  	"path/filepath"
    16  	"sort"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	"github.com/artpar/rclone/backend/crypt"
    24  	"github.com/artpar/rclone/fs"
    25  	"github.com/artpar/rclone/fs/cache"
    26  	"github.com/artpar/rclone/fs/config"
    27  	"github.com/artpar/rclone/fs/config/configmap"
    28  	"github.com/artpar/rclone/fs/config/configstruct"
    29  	"github.com/artpar/rclone/fs/config/obscure"
    30  	"github.com/artpar/rclone/fs/fspath"
    31  	"github.com/artpar/rclone/fs/hash"
    32  	"github.com/artpar/rclone/fs/rc"
    33  	"github.com/artpar/rclone/fs/walk"
    34  	"github.com/artpar/rclone/lib/atexit"
    35  	"golang.org/x/time/rate"
    36  )
    37  
    38  const (
    39  	// DefCacheChunkSize is the default value for chunk size
    40  	DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
    41  	// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
    42  	DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
    43  	// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
    44  	DefCacheChunkCleanInterval = fs.Duration(time.Minute)
    45  	// DefCacheInfoAge is the default value for object info age
    46  	DefCacheInfoAge = fs.Duration(6 * time.Hour)
    47  	// DefCacheReadRetries is the default value for read retries
    48  	DefCacheReadRetries = 10
    49  	// DefCacheTotalWorkers is how many workers run in parallel to download chunks
    50  	DefCacheTotalWorkers = 4
    51  	// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
    52  	DefCacheChunkNoMemory = false
    53  	// DefCacheRps limits the number of requests per second to the source FS
    54  	DefCacheRps = -1
    55  	// DefCacheWrites will cache file data on writes through the cache
    56  	DefCacheWrites = false
    57  	// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
    58  	DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
    59  	// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
    60  	DefCacheDbWaitTime = fs.Duration(1 * time.Second)
    61  )
    62  
    63  // Register with Fs
    64  func init() {
    65  	fs.Register(&fs.RegInfo{
    66  		Name:        "cache",
    67  		Description: "Cache a remote",
    68  		NewFs:       NewFs,
    69  		CommandHelp: commandHelp,
    70  		Options: []fs.Option{{
    71  			Name:     "remote",
    72  			Help:     "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
    73  			Required: true,
    74  		}, {
    75  			Name: "plex_url",
    76  			Help: "The URL of the Plex server.",
    77  		}, {
    78  			Name:      "plex_username",
    79  			Help:      "The username of the Plex user.",
    80  			Sensitive: true,
    81  		}, {
    82  			Name:       "plex_password",
    83  			Help:       "The password of the Plex user.",
    84  			IsPassword: true,
    85  		}, {
    86  			Name:      "plex_token",
    87  			Help:      "The plex token for authentication - auto set normally.",
    88  			Hide:      fs.OptionHideBoth,
    89  			Advanced:  true,
    90  			Sensitive: true,
    91  		}, {
    92  			Name:     "plex_insecure",
    93  			Help:     "Skip all certificate verification when connecting to the Plex server.",
    94  			Advanced: true,
    95  		}, {
    96  			Name: "chunk_size",
    97  			Help: `The size of a chunk (partial file data).
    98  
    99  Use lower numbers for slower connections. If the chunk size is
   100  changed, any downloaded chunks will be invalid and cache-chunk-path
   101  will need to be cleared or unexpected EOF errors will occur.`,
   102  			Default: DefCacheChunkSize,
   103  			Examples: []fs.OptionExample{{
   104  				Value: "1M",
   105  				Help:  "1 MiB",
   106  			}, {
   107  				Value: "5M",
   108  				Help:  "5 MiB",
   109  			}, {
   110  				Value: "10M",
   111  				Help:  "10 MiB",
   112  			}},
   113  		}, {
   114  			Name: "info_age",
   115  			Help: `How long to cache file structure information (directory listings, file size, times, etc.). 
   116  If all write operations are done through the cache then you can safely make
   117  this value very large as the cache store will also be updated in real time.`,
   118  			Default: DefCacheInfoAge,
   119  			Examples: []fs.OptionExample{{
   120  				Value: "1h",
   121  				Help:  "1 hour",
   122  			}, {
   123  				Value: "24h",
   124  				Help:  "24 hours",
   125  			}, {
   126  				Value: "48h",
   127  				Help:  "48 hours",
   128  			}},
   129  		}, {
   130  			Name: "chunk_total_size",
   131  			Help: `The total size that the chunks can take up on the local disk.
   132  
   133  If the cache exceeds this value then it will start to delete the
   134  oldest chunks until it goes under this value.`,
   135  			Default: DefCacheTotalChunkSize,
   136  			Examples: []fs.OptionExample{{
   137  				Value: "500M",
   138  				Help:  "500 MiB",
   139  			}, {
   140  				Value: "1G",
   141  				Help:  "1 GiB",
   142  			}, {
   143  				Value: "10G",
   144  				Help:  "10 GiB",
   145  			}},
   146  		}, {
   147  			Name:     "db_path",
   148  			Default:  filepath.Join(config.GetCacheDir(), "cache-backend"),
   149  			Help:     "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
   150  			Advanced: true,
   151  		}, {
   152  			Name:    "chunk_path",
   153  			Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
   154  			Help: `Directory to cache chunk files.
   155  
   156  Path to where partial file data (chunks) are stored locally. The remote
   157  name is appended to the final path.
   158  
   159  This config follows the "--cache-db-path". If you specify a custom
   160  location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
   161  then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
   162  			Advanced: true,
   163  		}, {
   164  			Name:     "db_purge",
   165  			Default:  false,
   166  			Help:     "Clear all the cached data for this remote on start.",
   167  			Hide:     fs.OptionHideConfigurator,
   168  			Advanced: true,
   169  		}, {
   170  			Name:    "chunk_clean_interval",
   171  			Default: DefCacheChunkCleanInterval,
   172  			Help: `How often should the cache perform cleanups of the chunk storage.
   173  
   174  The default value should be ok for most people. If you find that the
   175  cache goes over "cache-chunk-total-size" too often then try to lower
   176  this value to force it to perform cleanups more often.`,
   177  			Advanced: true,
   178  		}, {
   179  			Name:    "read_retries",
   180  			Default: DefCacheReadRetries,
   181  			Help: `How many times to retry a read from a cache storage.
   182  
   183  Since reading from a cache stream is independent from downloading file
   184  data, readers can get to a point where there's no more data in the
   185  cache.  Most of the times this can indicate a connectivity issue if
   186  cache isn't able to provide file data anymore.
   187  
   188  For really slow connections, increase this to a point where the stream is
   189  able to provide data but your experience will be very stuttering.`,
   190  			Advanced: true,
   191  		}, {
   192  			Name:    "workers",
   193  			Default: DefCacheTotalWorkers,
   194  			Help: `How many workers should run in parallel to download chunks.
   195  
   196  Higher values will mean more parallel processing (better CPU needed)
   197  and more concurrent requests on the cloud provider.  This impacts
   198  several aspects like the cloud provider API limits, more stress on the
   199  hardware that rclone runs on but it also means that streams will be
   200  more fluid and data will be available much more faster to readers.
   201  
   202  **Note**: If the optional Plex integration is enabled then this
   203  setting will adapt to the type of reading performed and the value
   204  specified here will be used as a maximum number of workers to use.`,
   205  			Advanced: true,
   206  		}, {
   207  			Name:    "chunk_no_memory",
   208  			Default: DefCacheChunkNoMemory,
   209  			Help: `Disable the in-memory cache for storing chunks during streaming.
   210  
   211  By default, cache will keep file data during streaming in RAM as well
   212  to provide it to readers as fast as possible.
   213  
   214  This transient data is evicted as soon as it is read and the number of
   215  chunks stored doesn't exceed the number of workers. However, depending
   216  on other settings like "cache-chunk-size" and "cache-workers" this footprint
   217  can increase if there are parallel streams too (multiple files being read
   218  at the same time).
   219  
   220  If the hardware permits it, use this feature to provide an overall better
   221  performance during streaming but it can also be disabled if RAM is not
   222  available on the local machine.`,
   223  			Advanced: true,
   224  		}, {
   225  			Name:    "rps",
   226  			Default: int(DefCacheRps),
   227  			Help: `Limits the number of requests per second to the source FS (-1 to disable).
   228  
   229  This setting places a hard limit on the number of requests per second
   230  that cache will be doing to the cloud provider remote and try to
   231  respect that value by setting waits between reads.
   232  
   233  If you find that you're getting banned or limited on the cloud
   234  provider through cache and know that a smaller number of requests per
   235  second will allow you to work with it then you can use this setting
   236  for that.
   237  
   238  A good balance of all the other settings should make this setting
   239  useless but it is available to set for more special cases.
   240  
   241  **NOTE**: This will limit the number of requests during streams but
   242  other API calls to the cloud provider like directory listings will
   243  still pass.`,
   244  			Advanced: true,
   245  		}, {
   246  			Name:    "writes",
   247  			Default: DefCacheWrites,
   248  			Help: `Cache file data on writes through the FS.
   249  
   250  If you need to read files immediately after you upload them through
   251  cache you can enable this flag to have their data stored in the
   252  cache store at the same time during upload.`,
   253  			Advanced: true,
   254  		}, {
   255  			Name:    "tmp_upload_path",
   256  			Default: "",
   257  			Help: `Directory to keep temporary files until they are uploaded.
   258  
   259  This is the path where cache will use as a temporary storage for new
   260  files that need to be uploaded to the cloud provider.
   261  
   262  Specifying a value will enable this feature. Without it, it is
   263  completely disabled and files will be uploaded directly to the cloud
   264  provider`,
   265  			Advanced: true,
   266  		}, {
   267  			Name:    "tmp_wait_time",
   268  			Default: DefCacheTmpWaitTime,
   269  			Help: `How long should files be stored in local cache before being uploaded.
   270  
   271  This is the duration that a file must wait in the temporary location
   272  _cache-tmp-upload-path_ before it is selected for upload.
   273  
   274  Note that only one file is uploaded at a time and it can take longer
   275  to start the upload if a queue formed for this purpose.`,
   276  			Advanced: true,
   277  		}, {
   278  			Name:    "db_wait_time",
   279  			Default: DefCacheDbWaitTime,
   280  			Help: `How long to wait for the DB to be available - 0 is unlimited.
   281  
   282  Only one process can have the DB open at any one time, so rclone waits
   283  for this duration for the DB to become available before it gives an
   284  error.
   285  
   286  If you set it to 0 then it will wait forever.`,
   287  			Advanced: true,
   288  		}},
   289  	})
   290  }
   291  
   292  // Options defines the configuration for this backend
   293  type Options struct {
   294  	Remote             string        `config:"remote"`
   295  	PlexURL            string        `config:"plex_url"`
   296  	PlexUsername       string        `config:"plex_username"`
   297  	PlexPassword       string        `config:"plex_password"`
   298  	PlexToken          string        `config:"plex_token"`
   299  	PlexInsecure       bool          `config:"plex_insecure"`
   300  	ChunkSize          fs.SizeSuffix `config:"chunk_size"`
   301  	InfoAge            fs.Duration   `config:"info_age"`
   302  	ChunkTotalSize     fs.SizeSuffix `config:"chunk_total_size"`
   303  	DbPath             string        `config:"db_path"`
   304  	ChunkPath          string        `config:"chunk_path"`
   305  	DbPurge            bool          `config:"db_purge"`
   306  	ChunkCleanInterval fs.Duration   `config:"chunk_clean_interval"`
   307  	ReadRetries        int           `config:"read_retries"`
   308  	TotalWorkers       int           `config:"workers"`
   309  	ChunkNoMemory      bool          `config:"chunk_no_memory"`
   310  	Rps                int           `config:"rps"`
   311  	StoreWrites        bool          `config:"writes"`
   312  	TempWritePath      string        `config:"tmp_upload_path"`
   313  	TempWaitTime       fs.Duration   `config:"tmp_wait_time"`
   314  	DbWaitTime         fs.Duration   `config:"db_wait_time"`
   315  }
   316  
   317  // Fs represents a wrapped fs.Fs
   318  type Fs struct {
   319  	fs.Fs
   320  	wrapper fs.Fs
   321  
   322  	name     string
   323  	root     string
   324  	opt      Options      // parsed options
   325  	features *fs.Features // optional features
   326  	cache    *Persistent
   327  	tempFs   fs.Fs
   328  
   329  	lastChunkCleanup time.Time
   330  	cleanupMu        sync.Mutex
   331  	rateLimiter      *rate.Limiter
   332  	plexConnector    *plexConnector
   333  	backgroundRunner *backgroundWriter
   334  	cleanupChan      chan bool
   335  	parentsForgetFn  []func(string, fs.EntryType)
   336  	notifiedRemotes  map[string]bool
   337  	notifiedMu       sync.Mutex
   338  	parentsForgetMu  sync.Mutex
   339  }
   340  
   341  // parseRootPath returns a cleaned root path and a nil error or "" and an error when the path is invalid
   342  func parseRootPath(path string) (string, error) {
   343  	return strings.Trim(path, "/"), nil
   344  }
   345  
   346  var warnDeprecated sync.Once
   347  
   348  // NewFs constructs an Fs from the path, container:path
   349  func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
   350  	warnDeprecated.Do(func() {
   351  		fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
   352  	})
   353  
   354  	// Parse config into Options struct
   355  	opt := new(Options)
   356  	err := configstruct.Set(m, opt)
   357  	if err != nil {
   358  		return nil, err
   359  	}
   360  	if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
   361  		return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
   362  			opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
   363  	}
   364  
   365  	if strings.HasPrefix(opt.Remote, name+":") {
   366  		return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
   367  	}
   368  
   369  	rpath, err := parseRootPath(rootPath)
   370  	if err != nil {
   371  		return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
   372  	}
   373  
   374  	remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
   375  	wrappedFs, wrapErr := cache.Get(ctx, remotePath)
   376  	if wrapErr != nil && wrapErr != fs.ErrorIsFile {
   377  		return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
   378  	}
   379  	var fsErr error
   380  	fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
   381  	if wrapErr == fs.ErrorIsFile {
   382  		fsErr = fs.ErrorIsFile
   383  		rpath = cleanPath(path.Dir(rpath))
   384  	}
   385  	// configure cache backend
   386  	if opt.DbPurge {
   387  		fs.Debugf(name, "Purging the DB")
   388  	}
   389  	f := &Fs{
   390  		Fs:               wrappedFs,
   391  		name:             name,
   392  		root:             rpath,
   393  		opt:              *opt,
   394  		lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
   395  		cleanupChan:      make(chan bool, 1),
   396  		notifiedRemotes:  make(map[string]bool),
   397  	}
   398  	cache.PinUntilFinalized(f.Fs, f)
   399  	rps := rate.Inf
   400  	if opt.Rps > 0 {
   401  		rps = rate.Limit(float64(opt.Rps))
   402  	}
   403  	f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
   404  
   405  	f.plexConnector = &plexConnector{}
   406  	if opt.PlexURL != "" {
   407  		if opt.PlexToken != "" {
   408  			f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
   409  			if err != nil {
   410  				return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
   411  			}
   412  		} else {
   413  			if opt.PlexPassword != "" && opt.PlexUsername != "" {
   414  				decPass, err := obscure.Reveal(opt.PlexPassword)
   415  				if err != nil {
   416  					decPass = opt.PlexPassword
   417  				}
   418  				f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
   419  					m.Set("plex_token", token)
   420  				})
   421  				if err != nil {
   422  					return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
   423  				}
   424  			}
   425  		}
   426  	}
   427  
   428  	dbPath := f.opt.DbPath
   429  	chunkPath := f.opt.ChunkPath
   430  	// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
   431  	if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
   432  		chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
   433  		chunkPath = dbPath
   434  	}
   435  	if filepath.Ext(dbPath) != "" {
   436  		dbPath = filepath.Dir(dbPath)
   437  	}
   438  	if filepath.Ext(chunkPath) != "" {
   439  		chunkPath = filepath.Dir(chunkPath)
   440  	}
   441  	err = os.MkdirAll(dbPath, os.ModePerm)
   442  	if err != nil {
   443  		return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
   444  	}
   445  	err = os.MkdirAll(chunkPath, os.ModePerm)
   446  	if err != nil {
   447  		return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
   448  	}
   449  
   450  	dbPath = filepath.Join(dbPath, name+".db")
   451  	chunkPath = filepath.Join(chunkPath, name)
   452  	fs.Infof(name, "Cache DB path: %v", dbPath)
   453  	fs.Infof(name, "Cache chunk path: %v", chunkPath)
   454  	f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
   455  		PurgeDb:    opt.DbPurge,
   456  		DbWaitTime: time.Duration(opt.DbWaitTime),
   457  	})
   458  	if err != nil {
   459  		return nil, fmt.Errorf("failed to start cache db: %w", err)
   460  	}
   461  	// Trap SIGINT and SIGTERM to close the DB handle gracefully
   462  	c := make(chan os.Signal, 1)
   463  	signal.Notify(c, syscall.SIGHUP)
   464  	atexit.Register(func() {
   465  		if opt.PlexURL != "" {
   466  			f.plexConnector.closeWebsocket()
   467  		}
   468  		f.StopBackgroundRunners()
   469  	})
   470  	go func() {
   471  		for {
   472  			s := <-c
   473  			if s == syscall.SIGHUP {
   474  				fs.Infof(f, "Clearing cache from signal")
   475  				f.DirCacheFlush()
   476  			}
   477  		}
   478  	}()
   479  
   480  	fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
   481  	fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
   482  	fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
   483  	fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
   484  	fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
   485  	fs.Infof(name, "File Age: %v", f.opt.InfoAge)
   486  	if f.opt.StoreWrites {
   487  		fs.Infof(name, "Cache Writes: enabled")
   488  	}
   489  
   490  	if f.opt.TempWritePath != "" {
   491  		err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
   492  		if err != nil {
   493  			return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
   494  		}
   495  		f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
   496  		f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
   497  		if err != nil {
   498  			return nil, fmt.Errorf("failed to create temp fs: %w", err)
   499  		}
   500  		fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
   501  		fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
   502  		f.backgroundRunner, _ = initBackgroundUploader(f)
   503  		go f.backgroundRunner.run()
   504  	}
   505  
   506  	go func() {
   507  		for {
   508  			time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
   509  			select {
   510  			case <-f.cleanupChan:
   511  				fs.Infof(f, "stopping cleanup")
   512  				return
   513  			default:
   514  				fs.Debugf(f, "starting cleanup")
   515  				f.CleanUpCache(false)
   516  			}
   517  		}
   518  	}()
   519  
   520  	if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
   521  		pollInterval := make(chan time.Duration, 1)
   522  		pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
   523  		doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
   524  	}
   525  
   526  	f.features = (&fs.Features{
   527  		CanHaveEmptyDirectories: true,
   528  		DuplicateFiles:          false, // storage doesn't permit this
   529  	}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
   530  	// override only those features that use a temp fs and it doesn't support them
   531  	//f.features.ChangeNotify = f.ChangeNotify
   532  	if f.opt.TempWritePath != "" {
   533  		if f.tempFs.Features().Move == nil {
   534  			f.features.Move = nil
   535  		}
   536  		if f.tempFs.Features().Move == nil {
   537  			f.features.Move = nil
   538  		}
   539  		if f.tempFs.Features().DirMove == nil {
   540  			f.features.DirMove = nil
   541  		}
   542  		if f.tempFs.Features().MergeDirs == nil {
   543  			f.features.MergeDirs = nil
   544  		}
   545  	}
   546  	// even if the wrapped fs doesn't support it, we still want it
   547  	f.features.DirCacheFlush = f.DirCacheFlush
   548  
   549  	rc.Add(rc.Call{
   550  		Path:  "cache/expire",
   551  		Fn:    f.httpExpireRemote,
   552  		Title: "Purge a remote from cache",
   553  		Help: `
   554  Purge a remote from the cache backend. Supports either a directory or a file.
   555  Params:
   556    - remote = path to remote (required)
   557    - withData = true/false to delete cached data (chunks) as well (optional)
   558  
   559  Eg
   560  
   561      rclone rc cache/expire remote=path/to/sub/folder/
   562      rclone rc cache/expire remote=/ withData=true 
   563  `,
   564  	})
   565  
   566  	rc.Add(rc.Call{
   567  		Path:  "cache/stats",
   568  		Fn:    f.httpStats,
   569  		Title: "Get cache stats",
   570  		Help: `
   571  Show statistics for the cache remote.
   572  `,
   573  	})
   574  
   575  	rc.Add(rc.Call{
   576  		Path:  "cache/fetch",
   577  		Fn:    f.rcFetch,
   578  		Title: "Fetch file chunks",
   579  		Help: `
   580  Ensure the specified file chunks are cached on disk.
   581  
   582  The chunks= parameter specifies the file chunks to check.
   583  It takes a comma separated list of array slice indices.
   584  The slice indices are similar to Python slices: start[:end]
   585  
   586  start is the 0 based chunk number from the beginning of the file
   587  to fetch inclusive. end is 0 based chunk number from the beginning
   588  of the file to fetch exclusive.
   589  Both values can be negative, in which case they count from the back
   590  of the file. The value "-5:" represents the last 5 chunks of a file.
   591  
   592  Some valid examples are:
   593  ":5,-5:" -> the first and last five chunks
   594  "0,-2" -> the first and the second last chunk
   595  "0:10" -> the first ten chunks
   596  
   597  Any parameter with a key that starts with "file" can be used to
   598  specify files to fetch, e.g.
   599  
   600      rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
   601  
   602  File names will automatically be encrypted when the a crypt remote
   603  is used on top of the cache.
   604  
   605  `,
   606  	})
   607  
   608  	return f, fsErr
   609  }
   610  
   611  func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   612  	out = make(rc.Params)
   613  	m, err := f.Stats()
   614  	if err != nil {
   615  		return out, fmt.Errorf("error while getting cache stats")
   616  	}
   617  	out["status"] = "ok"
   618  	out["stats"] = m
   619  	return out, nil
   620  }
   621  
   622  func (f *Fs) unwrapRemote(remote string) string {
   623  	remote = cleanPath(remote)
   624  	if remote != "" {
   625  		// if it's wrapped by crypt we need to check what format we got
   626  		if cryptFs, yes := f.isWrappedByCrypt(); yes {
   627  			_, err := cryptFs.DecryptFileName(remote)
   628  			// if it failed to decrypt then it is a decrypted format and we need to encrypt it
   629  			if err != nil {
   630  				return cryptFs.EncryptFileName(remote)
   631  			}
   632  			// else it's an encrypted format and we can use it as it is
   633  		}
   634  	}
   635  	return remote
   636  }
   637  
   638  func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   639  	out = make(rc.Params)
   640  	remoteInt, ok := in["remote"]
   641  	if !ok {
   642  		return out, fmt.Errorf("remote is needed")
   643  	}
   644  	remote := remoteInt.(string)
   645  	withData := false
   646  	_, ok = in["withData"]
   647  	if ok {
   648  		withData = true
   649  	}
   650  
   651  	remote = f.unwrapRemote(remote)
   652  	if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
   653  		return out, fmt.Errorf("%s doesn't exist in cache", remote)
   654  	}
   655  
   656  	co := NewObject(f, remote)
   657  	err = f.cache.GetObject(co)
   658  	if err != nil { // it could be a dir
   659  		cd := NewDirectory(f, remote)
   660  		err := f.cache.ExpireDir(cd)
   661  		if err != nil {
   662  			return out, fmt.Errorf("error expiring directory: %w", err)
   663  		}
   664  		// notify vfs too
   665  		f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
   666  		out["status"] = "ok"
   667  		out["message"] = fmt.Sprintf("cached directory cleared: %v", remote)
   668  		return out, nil
   669  	}
   670  	// expire the entry
   671  	err = f.cache.ExpireObject(co, withData)
   672  	if err != nil {
   673  		return out, fmt.Errorf("error expiring file: %w", err)
   674  	}
   675  	// notify vfs too
   676  	f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
   677  
   678  	out["status"] = "ok"
   679  	out["message"] = fmt.Sprintf("cached file cleared: %v", remote)
   680  	return out, nil
   681  }
   682  
   683  func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
   684  	type chunkRange struct {
   685  		start, end int64
   686  	}
   687  	parseChunks := func(ranges string) (crs []chunkRange, err error) {
   688  		for _, part := range strings.Split(ranges, ",") {
   689  			var start, end int64 = 0, math.MaxInt64
   690  			switch ints := strings.Split(part, ":"); len(ints) {
   691  			case 1:
   692  				start, err = strconv.ParseInt(ints[0], 10, 64)
   693  				if err != nil {
   694  					return nil, fmt.Errorf("invalid range: %q", part)
   695  				}
   696  				end = start + 1
   697  			case 2:
   698  				if ints[0] != "" {
   699  					start, err = strconv.ParseInt(ints[0], 10, 64)
   700  					if err != nil {
   701  						return nil, fmt.Errorf("invalid range: %q", part)
   702  					}
   703  				}
   704  				if ints[1] != "" {
   705  					end, err = strconv.ParseInt(ints[1], 10, 64)
   706  					if err != nil {
   707  						return nil, fmt.Errorf("invalid range: %q", part)
   708  					}
   709  				}
   710  			default:
   711  				return nil, fmt.Errorf("invalid range: %q", part)
   712  			}
   713  			crs = append(crs, chunkRange{start: start, end: end})
   714  		}
   715  		return
   716  	}
   717  	walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
   718  		if size <= 0 {
   719  			return
   720  		}
   721  		chunks := (size-1)/f.ChunkSize() + 1
   722  
   723  		start, end := cr.start, cr.end
   724  		if start < 0 {
   725  			start += chunks
   726  		}
   727  		if end <= 0 {
   728  			end += chunks
   729  		}
   730  		if end <= start {
   731  			return
   732  		}
   733  		switch {
   734  		case start < 0:
   735  			start = 0
   736  		case start >= chunks:
   737  			return
   738  		}
   739  		switch {
   740  		case end <= start:
   741  			end = start + 1
   742  		case end >= chunks:
   743  			end = chunks
   744  		}
   745  		for i := start; i < end; i++ {
   746  			cb(i)
   747  		}
   748  	}
   749  	walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
   750  		for _, cr := range crs {
   751  			walkChunkRange(cr, size, cb)
   752  		}
   753  	}
   754  
   755  	v, ok := in["chunks"]
   756  	if !ok {
   757  		return nil, errors.New("missing chunks parameter")
   758  	}
   759  	s, ok := v.(string)
   760  	if !ok {
   761  		return nil, errors.New("invalid chunks parameter")
   762  	}
   763  	delete(in, "chunks")
   764  	crs, err := parseChunks(s)
   765  	if err != nil {
   766  		return nil, fmt.Errorf("invalid chunks parameter: %w", err)
   767  	}
   768  	var files [][2]string
   769  	for k, v := range in {
   770  		if !strings.HasPrefix(k, "file") {
   771  			return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
   772  		}
   773  		switch v := v.(type) {
   774  		case string:
   775  			files = append(files, [2]string{v, f.unwrapRemote(v)})
   776  		default:
   777  			return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
   778  		}
   779  	}
   780  	type fileStatus struct {
   781  		Error         string
   782  		FetchedChunks int
   783  	}
   784  	fetchedChunks := make(map[string]fileStatus, len(files))
   785  	for _, pair := range files {
   786  		file, remote := pair[0], pair[1]
   787  		var status fileStatus
   788  		o, err := f.NewObject(ctx, remote)
   789  		if err != nil {
   790  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   791  			continue
   792  		}
   793  		co := o.(*Object)
   794  		err = co.refreshFromSource(ctx, true)
   795  		if err != nil {
   796  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   797  			continue
   798  		}
   799  		handle := NewObjectHandle(ctx, co, f)
   800  		handle.UseMemory = false
   801  		handle.scaleWorkers(1)
   802  		walkChunkRanges(crs, co.Size(), func(chunk int64) {
   803  			_, err := handle.getChunk(chunk * f.ChunkSize())
   804  			if err != nil {
   805  				if status.Error == "" {
   806  					status.Error = err.Error()
   807  				}
   808  			} else {
   809  				status.FetchedChunks++
   810  			}
   811  		})
   812  		fetchedChunks[file] = status
   813  	}
   814  
   815  	return rc.Params{"status": fetchedChunks}, nil
   816  }
   817  
   818  // receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
   819  func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
   820  	if crypt, yes := f.isWrappedByCrypt(); yes {
   821  		decryptedPath, err := crypt.DecryptFileName(forgetPath)
   822  		if err == nil {
   823  			fs.Infof(decryptedPath, "received cache expiry notification")
   824  		} else {
   825  			fs.Infof(forgetPath, "received cache expiry notification")
   826  		}
   827  	} else {
   828  		fs.Infof(forgetPath, "received cache expiry notification")
   829  	}
   830  	// notify upstreams too (vfs)
   831  	f.notifyChangeUpstream(forgetPath, entryType)
   832  
   833  	var cd *Directory
   834  	if entryType == fs.EntryObject {
   835  		co := NewObject(f, forgetPath)
   836  		err := f.cache.GetObject(co)
   837  		if err != nil {
   838  			fs.Debugf(f, "got change notification for non cached entry %v", co)
   839  		}
   840  		err = f.cache.ExpireObject(co, true)
   841  		if err != nil {
   842  			fs.Debugf(forgetPath, "notify: error expiring '%v': %v", co, err)
   843  		}
   844  		cd = NewDirectory(f, cleanPath(path.Dir(co.Remote())))
   845  	} else {
   846  		cd = NewDirectory(f, forgetPath)
   847  	}
   848  	// we expire the dir
   849  	err := f.cache.ExpireDir(cd)
   850  	if err != nil {
   851  		fs.Debugf(forgetPath, "notify: error expiring '%v': %v", cd, err)
   852  	} else {
   853  		fs.Debugf(forgetPath, "notify: expired '%v'", cd)
   854  	}
   855  
   856  	f.notifiedMu.Lock()
   857  	defer f.notifiedMu.Unlock()
   858  	f.notifiedRemotes[forgetPath] = true
   859  	f.notifiedRemotes[cd.Remote()] = true
   860  }
   861  
   862  // notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
   863  // or if we use a temp fs
   864  func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
   865  	if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
   866  		f.notifyChangeUpstream(remote, entryType)
   867  	}
   868  }
   869  
   870  // notifyChangeUpstream will loop through all the upstreams and notify
   871  // of the provided remote (should be only a dir)
   872  func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
   873  	f.parentsForgetMu.Lock()
   874  	defer f.parentsForgetMu.Unlock()
   875  	if len(f.parentsForgetFn) > 0 {
   876  		for _, fn := range f.parentsForgetFn {
   877  			fn(remote, entryType)
   878  		}
   879  	}
   880  }
   881  
   882  // ChangeNotify can subscribe multiple callers
   883  // this is coupled with the wrapped fs ChangeNotify (if it supports it)
   884  // and also notifies other caches (i.e VFS) to clear out whenever something changes
   885  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
   886  	f.parentsForgetMu.Lock()
   887  	defer f.parentsForgetMu.Unlock()
   888  	fs.Debugf(f, "subscribing to ChangeNotify")
   889  	f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
   890  	go func() {
   891  		for range pollInterval {
   892  		}
   893  	}()
   894  }
   895  
   896  // Name of the remote (as passed into NewFs)
   897  func (f *Fs) Name() string {
   898  	return f.name
   899  }
   900  
   901  // Root of the remote (as passed into NewFs)
   902  func (f *Fs) Root() string {
   903  	return f.root
   904  }
   905  
   906  // Features returns the optional features of this Fs
   907  func (f *Fs) Features() *fs.Features {
   908  	return f.features
   909  }
   910  
   911  // String returns a description of the FS
   912  func (f *Fs) String() string {
   913  	return fmt.Sprintf("Cache remote %s:%s", f.name, f.root)
   914  }
   915  
   916  // ChunkSize returns the configured chunk size
   917  func (f *Fs) ChunkSize() int64 {
   918  	return int64(f.opt.ChunkSize)
   919  }
   920  
   921  // InfoAge returns the configured file age
   922  func (f *Fs) InfoAge() time.Duration {
   923  	return time.Duration(f.opt.InfoAge)
   924  }
   925  
   926  // TempUploadWaitTime returns the configured temp file upload wait time
   927  func (f *Fs) TempUploadWaitTime() time.Duration {
   928  	return time.Duration(f.opt.TempWaitTime)
   929  }
   930  
   931  // NewObject finds the Object at remote.
   932  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   933  	var err error
   934  
   935  	fs.Debugf(f, "new object '%s'", remote)
   936  	co := NewObject(f, remote)
   937  	// search for entry in cache and validate it
   938  	err = f.cache.GetObject(co)
   939  	if err != nil {
   940  		fs.Debugf(remote, "find: error: %v", err)
   941  	} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   942  		fs.Debugf(co, "find: cold object: %+v", co)
   943  	} else {
   944  		fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   945  		return co, nil
   946  	}
   947  
   948  	// search for entry in source or temp fs
   949  	var obj fs.Object
   950  	if f.opt.TempWritePath != "" {
   951  		obj, err = f.tempFs.NewObject(ctx, remote)
   952  		// not found in temp fs
   953  		if err != nil {
   954  			fs.Debugf(remote, "find: not found in local cache fs")
   955  			obj, err = f.Fs.NewObject(ctx, remote)
   956  		} else {
   957  			fs.Debugf(obj, "find: found in local cache fs")
   958  		}
   959  	} else {
   960  		obj, err = f.Fs.NewObject(ctx, remote)
   961  	}
   962  
   963  	// not found in either fs
   964  	if err != nil {
   965  		fs.Debugf(obj, "find failed: not found in either local or remote fs")
   966  		return nil, err
   967  	}
   968  
   969  	// cache the new entry
   970  	co = ObjectFromOriginal(ctx, f, obj).persist()
   971  	fs.Debugf(co, "find: cached object")
   972  	return co, nil
   973  }
   974  
   975  // List the objects and directories in dir into entries
   976  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   977  	fs.Debugf(f, "list '%s'", dir)
   978  	cd := ShallowDirectory(f, dir)
   979  
   980  	// search for cached dir entries and validate them
   981  	entries, err = f.cache.GetDirEntries(cd)
   982  	if err != nil {
   983  		fs.Debugf(dir, "list: error: %v", err)
   984  	} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   985  		fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
   986  	} else if len(entries) == 0 {
   987  		// TODO: read empty dirs from source?
   988  		fs.Debugf(dir, "list: empty listing")
   989  	} else {
   990  		fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   991  		fs.Debugf(dir, "list: cached entries: %v", entries)
   992  		return entries, nil
   993  	}
   994  
   995  	// we first search any temporary files stored locally
   996  	var cachedEntries fs.DirEntries
   997  	if f.opt.TempWritePath != "" {
   998  		queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
   999  		if err != nil {
  1000  			fs.Errorf(dir, "list: error getting pending uploads: %v", err)
  1001  		} else {
  1002  			fs.Debugf(dir, "list: read %v from temp fs", len(queuedEntries))
  1003  			fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
  1004  
  1005  			for _, queuedRemote := range queuedEntries {
  1006  				queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
  1007  				if err != nil {
  1008  					fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
  1009  					continue
  1010  				}
  1011  				co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
  1012  				fs.Debugf(co, "list: cached temp object")
  1013  				cachedEntries = append(cachedEntries, co)
  1014  			}
  1015  		}
  1016  	}
  1017  
  1018  	// search from the source
  1019  	sourceEntries, err := f.Fs.List(ctx, dir)
  1020  	if err != nil {
  1021  		return nil, err
  1022  	}
  1023  	fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
  1024  	fs.Debugf(dir, "list: source entries: %v", sourceEntries)
  1025  
  1026  	sort.Sort(sourceEntries)
  1027  	for _, entry := range entries {
  1028  		entryRemote := entry.Remote()
  1029  		i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
  1030  		if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
  1031  			continue
  1032  		}
  1033  		fp := path.Join(f.Root(), entryRemote)
  1034  		switch entry.(type) {
  1035  		case fs.Object:
  1036  			_ = f.cache.RemoveObject(fp)
  1037  		case fs.Directory:
  1038  			_ = f.cache.RemoveDir(fp)
  1039  		}
  1040  		fs.Debugf(dir, "list: remove entry: %v", entryRemote)
  1041  	}
  1042  	entries = nil //nolint:ineffassign
  1043  
  1044  	// and then iterate over the ones from source (temp Objects will override source ones)
  1045  	var batchDirectories []*Directory
  1046  	sort.Sort(cachedEntries)
  1047  	tmpCnt := len(cachedEntries)
  1048  	for _, entry := range sourceEntries {
  1049  		switch o := entry.(type) {
  1050  		case fs.Object:
  1051  			// skip over temporary objects (might be uploading)
  1052  			oRemote := o.Remote()
  1053  			i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
  1054  			if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
  1055  				continue
  1056  			}
  1057  			co := ObjectFromOriginal(ctx, f, o).persist()
  1058  			cachedEntries = append(cachedEntries, co)
  1059  			fs.Debugf(dir, "list: cached object: %v", co)
  1060  		case fs.Directory:
  1061  			cdd := DirectoryFromOriginal(ctx, f, o)
  1062  			// check if the dir isn't expired and add it in cache if it isn't
  1063  			if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
  1064  				batchDirectories = append(batchDirectories, cdd)
  1065  			}
  1066  			cachedEntries = append(cachedEntries, cdd)
  1067  		default:
  1068  			fs.Debugf(entry, "list: Unknown object type %T", entry)
  1069  		}
  1070  	}
  1071  	err = f.cache.AddBatchDir(batchDirectories)
  1072  	if err != nil {
  1073  		fs.Errorf(dir, "list: error caching directories from listing %v", dir)
  1074  	} else {
  1075  		fs.Debugf(dir, "list: cached directories: %v", len(batchDirectories))
  1076  	}
  1077  
  1078  	// cache dir meta
  1079  	t := time.Now()
  1080  	cd.CacheTs = &t
  1081  	err = f.cache.AddDir(cd)
  1082  	if err != nil {
  1083  		fs.Errorf(cd, "list: save error: '%v'", err)
  1084  	} else {
  1085  		fs.Debugf(dir, "list: cached dir: '%v', cache ts: %v", cd.abs(), cd.CacheTs)
  1086  	}
  1087  
  1088  	return cachedEntries, nil
  1089  }
  1090  
  1091  func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
  1092  	entries, err := f.List(ctx, dir)
  1093  	if err != nil {
  1094  		return err
  1095  	}
  1096  
  1097  	for i := 0; i < len(entries); i++ {
  1098  		innerDir, ok := entries[i].(fs.Directory)
  1099  		if ok {
  1100  			err := f.recurse(ctx, innerDir.Remote(), list)
  1101  			if err != nil {
  1102  				return err
  1103  			}
  1104  		}
  1105  
  1106  		err := list.Add(entries[i])
  1107  		if err != nil {
  1108  			return err
  1109  		}
  1110  	}
  1111  
  1112  	return nil
  1113  }
  1114  
  1115  // ListR lists the objects and directories of the Fs starting
  1116  // from dir recursively into out.
  1117  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1118  	fs.Debugf(f, "list recursively from '%s'", dir)
  1119  
  1120  	// we check if the source FS supports ListR
  1121  	// if it does, we'll use that to get all the entries, cache them and return
  1122  	do := f.Fs.Features().ListR
  1123  	if do != nil {
  1124  		return do(ctx, dir, func(entries fs.DirEntries) error {
  1125  			// we got called back with a set of entries so let's cache them and call the original callback
  1126  			for _, entry := range entries {
  1127  				switch o := entry.(type) {
  1128  				case fs.Object:
  1129  					_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
  1130  				case fs.Directory:
  1131  					_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
  1132  				default:
  1133  					return fmt.Errorf("unknown object type %T", entry)
  1134  				}
  1135  			}
  1136  
  1137  			// call the original callback
  1138  			return callback(entries)
  1139  		})
  1140  	}
  1141  
  1142  	// if we're here, we're gonna do a standard recursive traversal and cache everything
  1143  	list := walk.NewListRHelper(callback)
  1144  	err = f.recurse(ctx, dir, list)
  1145  	if err != nil {
  1146  		return err
  1147  	}
  1148  
  1149  	return list.Flush()
  1150  }
  1151  
  1152  // Mkdir makes the directory (container, bucket)
  1153  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1154  	fs.Debugf(f, "mkdir '%s'", dir)
  1155  	err := f.Fs.Mkdir(ctx, dir)
  1156  	if err != nil {
  1157  		return err
  1158  	}
  1159  	fs.Debugf(dir, "mkdir: created dir in source fs")
  1160  
  1161  	cd := NewDirectory(f, cleanPath(dir))
  1162  	err = f.cache.AddDir(cd)
  1163  	if err != nil {
  1164  		fs.Errorf(dir, "mkdir: add error: %v", err)
  1165  	} else {
  1166  		fs.Debugf(cd, "mkdir: added to cache")
  1167  	}
  1168  	// expire parent of new dir
  1169  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1170  	err = f.cache.ExpireDir(parentCd)
  1171  	if err != nil {
  1172  		fs.Errorf(parentCd, "mkdir: cache expire error: %v", err)
  1173  	} else {
  1174  		fs.Infof(parentCd, "mkdir: cache expired")
  1175  	}
  1176  	// advertise to ChangeNotify if wrapped doesn't do that
  1177  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1178  
  1179  	return nil
  1180  }
  1181  
  1182  // Rmdir removes the directory (container, bucket) if empty
  1183  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1184  	fs.Debugf(f, "rmdir '%s'", dir)
  1185  
  1186  	if f.opt.TempWritePath != "" {
  1187  		// pause background uploads
  1188  		f.backgroundRunner.pause()
  1189  		defer f.backgroundRunner.play()
  1190  
  1191  		// we check if the source exists on the remote and make the same move on it too if it does
  1192  		// otherwise, we skip this step
  1193  		_, err := f.UnWrap().List(ctx, dir)
  1194  		if err == nil {
  1195  			err := f.Fs.Rmdir(ctx, dir)
  1196  			if err != nil {
  1197  				return err
  1198  			}
  1199  			fs.Debugf(dir, "rmdir: removed dir in source fs")
  1200  		}
  1201  
  1202  		var queuedEntries []*Object
  1203  		err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1204  			for _, o := range entries {
  1205  				if oo, ok := o.(fs.Object); ok {
  1206  					co := ObjectFromOriginal(ctx, f, oo)
  1207  					queuedEntries = append(queuedEntries, co)
  1208  				}
  1209  			}
  1210  			return nil
  1211  		})
  1212  		if err != nil {
  1213  			fs.Errorf(dir, "rmdir: error getting pending uploads: %v", err)
  1214  		} else {
  1215  			fs.Debugf(dir, "rmdir: read %v from temp fs", len(queuedEntries))
  1216  			fs.Debugf(dir, "rmdir: temp fs entries: %v", queuedEntries)
  1217  			if len(queuedEntries) > 0 {
  1218  				fs.Errorf(dir, "rmdir: temporary dir not empty: %v", queuedEntries)
  1219  				return fs.ErrorDirectoryNotEmpty
  1220  			}
  1221  		}
  1222  	} else {
  1223  		err := f.Fs.Rmdir(ctx, dir)
  1224  		if err != nil {
  1225  			return err
  1226  		}
  1227  		fs.Debugf(dir, "rmdir: removed dir in source fs")
  1228  	}
  1229  
  1230  	// remove dir data
  1231  	d := NewDirectory(f, dir)
  1232  	err := f.cache.RemoveDir(d.abs())
  1233  	if err != nil {
  1234  		fs.Errorf(dir, "rmdir: remove error: %v", err)
  1235  	} else {
  1236  		fs.Debugf(d, "rmdir: removed from cache")
  1237  	}
  1238  	// expire parent
  1239  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1240  	err = f.cache.ExpireDir(parentCd)
  1241  	if err != nil {
  1242  		fs.Errorf(dir, "rmdir: cache expire error: %v", err)
  1243  	} else {
  1244  		fs.Infof(parentCd, "rmdir: cache expired")
  1245  	}
  1246  	// advertise to ChangeNotify if wrapped doesn't do that
  1247  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1248  
  1249  	return nil
  1250  }
  1251  
  1252  // DirMove moves src, srcRemote to this remote at dstRemote
  1253  // using server-side move operations.
  1254  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1255  	fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
  1256  
  1257  	do := f.Fs.Features().DirMove
  1258  	if do == nil {
  1259  		return fs.ErrorCantDirMove
  1260  	}
  1261  	srcFs, ok := src.(*Fs)
  1262  	if !ok {
  1263  		fs.Errorf(srcFs, "can't move directory - not same remote type")
  1264  		return fs.ErrorCantDirMove
  1265  	}
  1266  	if srcFs.Fs.Name() != f.Fs.Name() {
  1267  		fs.Errorf(srcFs, "can't move directory - not wrapping same remotes")
  1268  		return fs.ErrorCantDirMove
  1269  	}
  1270  
  1271  	if f.opt.TempWritePath != "" {
  1272  		// pause background uploads
  1273  		f.backgroundRunner.pause()
  1274  		defer f.backgroundRunner.play()
  1275  
  1276  		_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
  1277  		_, errInTemp := f.tempFs.List(ctx, srcRemote)
  1278  		// not found in either fs
  1279  		if errInWrap != nil && errInTemp != nil {
  1280  			return fs.ErrorDirNotFound
  1281  		}
  1282  
  1283  		// we check if the source exists on the remote and make the same move on it too if it does
  1284  		// otherwise, we skip this step
  1285  		if errInWrap == nil {
  1286  			err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1287  			if err != nil {
  1288  				return err
  1289  			}
  1290  			fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1291  		}
  1292  		// we need to check if the directory exists in the temp fs
  1293  		// and skip the move if it doesn't
  1294  		if errInTemp != nil {
  1295  			goto cleanup
  1296  		}
  1297  
  1298  		var queuedEntries []*Object
  1299  		err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1300  			for _, o := range entries {
  1301  				if oo, ok := o.(fs.Object); ok {
  1302  					co := ObjectFromOriginal(ctx, f, oo)
  1303  					queuedEntries = append(queuedEntries, co)
  1304  					if co.tempFileStartedUpload() {
  1305  						fs.Errorf(co, "can't move - upload has already started. need to finish that")
  1306  						return fs.ErrorCantDirMove
  1307  					}
  1308  				}
  1309  			}
  1310  			return nil
  1311  		})
  1312  		if err != nil {
  1313  			return err
  1314  		}
  1315  		fs.Debugf(srcRemote, "dirmove: read %v from temp fs", len(queuedEntries))
  1316  		fs.Debugf(srcRemote, "dirmove: temp fs entries: %v", queuedEntries)
  1317  
  1318  		do := f.tempFs.Features().DirMove
  1319  		if do == nil {
  1320  			fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
  1321  			return fs.ErrorCantDirMove
  1322  		}
  1323  		err = do(ctx, f.tempFs, srcRemote, dstRemote)
  1324  		if err != nil {
  1325  			return err
  1326  		}
  1327  		err = f.cache.ReconcileTempUploads(ctx, f)
  1328  		if err != nil {
  1329  			return err
  1330  		}
  1331  	} else {
  1332  		err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1333  		if err != nil {
  1334  			return err
  1335  		}
  1336  		fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1337  	}
  1338  cleanup:
  1339  
  1340  	// delete src dir from cache along with all chunks
  1341  	srcDir := NewDirectory(srcFs, srcRemote)
  1342  	err := f.cache.RemoveDir(srcDir.abs())
  1343  	if err != nil {
  1344  		fs.Errorf(srcDir, "dirmove: remove error: %v", err)
  1345  	} else {
  1346  		fs.Debugf(srcDir, "dirmove: removed cached dir")
  1347  	}
  1348  	// expire src parent
  1349  	srcParent := NewDirectory(f, cleanPath(path.Dir(srcRemote)))
  1350  	err = f.cache.ExpireDir(srcParent)
  1351  	if err != nil {
  1352  		fs.Errorf(srcParent, "dirmove: cache expire error: %v", err)
  1353  	} else {
  1354  		fs.Debugf(srcParent, "dirmove: cache expired")
  1355  	}
  1356  	// advertise to ChangeNotify if wrapped doesn't do that
  1357  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1358  
  1359  	// expire parent dir at the destination path
  1360  	dstParent := NewDirectory(f, cleanPath(path.Dir(dstRemote)))
  1361  	err = f.cache.ExpireDir(dstParent)
  1362  	if err != nil {
  1363  		fs.Errorf(dstParent, "dirmove: cache expire error: %v", err)
  1364  	} else {
  1365  		fs.Debugf(dstParent, "dirmove: cache expired")
  1366  	}
  1367  	// advertise to ChangeNotify if wrapped doesn't do that
  1368  	f.notifyChangeUpstreamIfNeeded(dstParent.Remote(), fs.EntryDirectory)
  1369  	// TODO: precache dst dir and save the chunks
  1370  
  1371  	return nil
  1372  }
  1373  
  1374  // cacheReader will split the stream of a reader to be cached at the same time it is read by the original source
  1375  func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn io.Reader)) {
  1376  	// create the pipe and tee reader
  1377  	pr, pw := io.Pipe()
  1378  	tr := io.TeeReader(u, pw)
  1379  
  1380  	// create channel to synchronize
  1381  	done := make(chan bool)
  1382  	defer close(done)
  1383  
  1384  	go func() {
  1385  		// notify the cache reader that we're complete after the source FS finishes
  1386  		defer func() {
  1387  			_ = pw.Close()
  1388  		}()
  1389  		// process original reading
  1390  		originalRead(tr)
  1391  		// signal complete
  1392  		done <- true
  1393  	}()
  1394  
  1395  	go func() {
  1396  		var offset int64
  1397  		for {
  1398  			chunk := make([]byte, f.opt.ChunkSize)
  1399  			readSize, err := io.ReadFull(pr, chunk)
  1400  			// we ignore 3 failures which are ok:
  1401  			// 1. EOF - original reading finished and we got a full buffer too
  1402  			// 2. ErrUnexpectedEOF - original reading finished and partial buffer
  1403  			// 3. ErrClosedPipe - source remote reader was closed (usually means it reached the end) and we need to stop too
  1404  			// if we have a different error: we're going to error out the original reading too and stop this
  1405  			if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF && err != io.ErrClosedPipe {
  1406  				fs.Errorf(src, "error saving new data in cache. offset: %v, err: %v", offset, err)
  1407  				_ = pr.CloseWithError(err)
  1408  				break
  1409  			}
  1410  			// if we have some bytes we cache them
  1411  			if readSize > 0 {
  1412  				chunk = chunk[:readSize]
  1413  				err2 := f.cache.AddChunk(cleanPath(path.Join(f.root, src.Remote())), chunk, offset)
  1414  				if err2 != nil {
  1415  					fs.Errorf(src, "error saving new data in cache '%v'", err2)
  1416  					_ = pr.CloseWithError(err2)
  1417  					break
  1418  				}
  1419  				offset += int64(readSize)
  1420  			}
  1421  			// stuff should be closed but let's be sure
  1422  			if err == io.EOF || err == io.ErrUnexpectedEOF || err == io.ErrClosedPipe {
  1423  				_ = pr.Close()
  1424  				break
  1425  			}
  1426  		}
  1427  
  1428  		// signal complete
  1429  		done <- true
  1430  	}()
  1431  
  1432  	// wait until both are done
  1433  	for c := 0; c < 2; c++ {
  1434  		<-done
  1435  	}
  1436  }
  1437  
  1438  type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
  1439  
  1440  // put in to the remote path
  1441  func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
  1442  	var err error
  1443  	var obj fs.Object
  1444  
  1445  	// queue for upload and store in temp fs if configured
  1446  	if f.opt.TempWritePath != "" {
  1447  		// we need to clear the caches before a put through temp fs
  1448  		parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1449  		_ = f.cache.ExpireDir(parentCd)
  1450  		f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1451  
  1452  		obj, err = f.tempFs.Put(ctx, in, src, options...)
  1453  		if err != nil {
  1454  			fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
  1455  			return nil, err
  1456  		}
  1457  		fs.Infof(obj, "put: uploaded in temp fs")
  1458  		err = f.cache.addPendingUpload(path.Join(f.Root(), src.Remote()), false)
  1459  		if err != nil {
  1460  			fs.Errorf(obj, "put: failed to queue for upload: %v", err)
  1461  			return nil, err
  1462  		}
  1463  		fs.Infof(obj, "put: queued for upload")
  1464  		// if cache writes is enabled write it first through cache
  1465  	} else if f.opt.StoreWrites {
  1466  		f.cacheReader(in, src, func(inn io.Reader) {
  1467  			obj, err = put(ctx, inn, src, options...)
  1468  		})
  1469  		if err == nil {
  1470  			fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
  1471  		}
  1472  		// last option: save it directly in remote fs
  1473  	} else {
  1474  		obj, err = put(ctx, in, src, options...)
  1475  		if err == nil {
  1476  			fs.Debugf(obj, "put: uploaded to remote fs")
  1477  		}
  1478  	}
  1479  	// validate and stop if errors are found
  1480  	if err != nil {
  1481  		fs.Errorf(src, "put: error uploading: %v", err)
  1482  		return nil, err
  1483  	}
  1484  
  1485  	// cache the new file
  1486  	cachedObj := ObjectFromOriginal(ctx, f, obj)
  1487  
  1488  	// deleting cached chunks and info to be replaced with new ones
  1489  	_ = f.cache.RemoveObject(cachedObj.abs())
  1490  
  1491  	cachedObj.persist()
  1492  	fs.Debugf(cachedObj, "put: added to cache")
  1493  
  1494  	// expire parent
  1495  	parentCd := NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1496  	err = f.cache.ExpireDir(parentCd)
  1497  	if err != nil {
  1498  		fs.Errorf(cachedObj, "put: cache expire error: %v", err)
  1499  	} else {
  1500  		fs.Infof(parentCd, "put: cache expired")
  1501  	}
  1502  	// advertise to ChangeNotify
  1503  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1504  
  1505  	return cachedObj, nil
  1506  }
  1507  
  1508  // Put in to the remote path with the modTime given of the given size
  1509  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1510  	fs.Debugf(f, "put data at '%s'", src.Remote())
  1511  	return f.put(ctx, in, src, options, f.Fs.Put)
  1512  }
  1513  
  1514  // PutUnchecked uploads the object
  1515  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1516  	do := f.Fs.Features().PutUnchecked
  1517  	if do == nil {
  1518  		return nil, errors.New("can't PutUnchecked")
  1519  	}
  1520  	fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
  1521  	return f.put(ctx, in, src, options, do)
  1522  }
  1523  
  1524  // PutStream uploads the object
  1525  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1526  	do := f.Fs.Features().PutStream
  1527  	if do == nil {
  1528  		return nil, errors.New("can't PutStream")
  1529  	}
  1530  	fs.Debugf(f, "put data streaming in '%s'", src.Remote())
  1531  	return f.put(ctx, in, src, options, do)
  1532  }
  1533  
  1534  // Copy src to this remote using server-side copy operations.
  1535  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1536  	fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
  1537  
  1538  	do := f.Fs.Features().Copy
  1539  	if do == nil {
  1540  		fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
  1541  		return nil, fs.ErrorCantCopy
  1542  	}
  1543  	if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
  1544  		return nil, fs.ErrorCantCopy
  1545  	}
  1546  	// the source must be a cached object or we abort
  1547  	srcObj, ok := src.(*Object)
  1548  	if !ok {
  1549  		fs.Errorf(srcObj, "can't copy - not same remote type")
  1550  		return nil, fs.ErrorCantCopy
  1551  	}
  1552  	// both the source cache fs and this cache fs need to wrap the same remote
  1553  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1554  		fs.Errorf(srcObj, "can't copy - not wrapping same remotes")
  1555  		return nil, fs.ErrorCantCopy
  1556  	}
  1557  	// refresh from source or abort
  1558  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1559  		fs.Errorf(f, "can't copy %v - %v", src, err)
  1560  		return nil, fs.ErrorCantCopy
  1561  	}
  1562  
  1563  	if srcObj.isTempFile() {
  1564  		// we check if the feature is still active
  1565  		if f.opt.TempWritePath == "" {
  1566  			fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
  1567  			return nil, fs.ErrorCantCopy
  1568  		}
  1569  
  1570  		do = srcObj.ParentFs.Features().Copy
  1571  		if do == nil {
  1572  			fs.Errorf(src, "parent remote (%v) doesn't support Copy", srcObj.ParentFs)
  1573  			return nil, fs.ErrorCantCopy
  1574  		}
  1575  	}
  1576  
  1577  	obj, err := do(ctx, srcObj.Object, remote)
  1578  	if err != nil {
  1579  		fs.Errorf(srcObj, "error moving in cache: %v", err)
  1580  		return nil, err
  1581  	}
  1582  	fs.Debugf(obj, "copy: file copied")
  1583  
  1584  	// persist new
  1585  	co := ObjectFromOriginal(ctx, f, obj).persist()
  1586  	fs.Debugf(co, "copy: added to cache")
  1587  	// expire the destination path
  1588  	parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
  1589  	err = f.cache.ExpireDir(parentCd)
  1590  	if err != nil {
  1591  		fs.Errorf(parentCd, "copy: cache expire error: %v", err)
  1592  	} else {
  1593  		fs.Infof(parentCd, "copy: cache expired")
  1594  	}
  1595  	// advertise to ChangeNotify if wrapped doesn't do that
  1596  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1597  	// expire src parent
  1598  	srcParent := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1599  	err = f.cache.ExpireDir(srcParent)
  1600  	if err != nil {
  1601  		fs.Errorf(srcParent, "copy: cache expire error: %v", err)
  1602  	} else {
  1603  		fs.Infof(srcParent, "copy: cache expired")
  1604  	}
  1605  	// advertise to ChangeNotify if wrapped doesn't do that
  1606  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1607  
  1608  	return co, nil
  1609  }
  1610  
  1611  // Move src to this remote using server-side move operations.
  1612  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1613  	fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
  1614  
  1615  	// if source fs doesn't support move abort
  1616  	do := f.Fs.Features().Move
  1617  	if do == nil {
  1618  		fs.Errorf(src, "source remote (%v) doesn't support Move", src.Fs())
  1619  		return nil, fs.ErrorCantMove
  1620  	}
  1621  	// the source must be a cached object or we abort
  1622  	srcObj, ok := src.(*Object)
  1623  	if !ok {
  1624  		fs.Errorf(srcObj, "can't move - not same remote type")
  1625  		return nil, fs.ErrorCantMove
  1626  	}
  1627  	// both the source cache fs and this cache fs need to wrap the same remote
  1628  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1629  		fs.Errorf(srcObj, "can't move - not wrapping same remote types")
  1630  		return nil, fs.ErrorCantMove
  1631  	}
  1632  	// refresh from source or abort
  1633  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1634  		fs.Errorf(f, "can't move %v - %v", src, err)
  1635  		return nil, fs.ErrorCantMove
  1636  	}
  1637  
  1638  	// if this is a temp object then we perform the changes locally
  1639  	if srcObj.isTempFile() {
  1640  		// we check if the feature is still active
  1641  		if f.opt.TempWritePath == "" {
  1642  			fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
  1643  			return nil, fs.ErrorCantMove
  1644  		}
  1645  		// pause background uploads
  1646  		f.backgroundRunner.pause()
  1647  		defer f.backgroundRunner.play()
  1648  
  1649  		// started uploads can't be moved until they complete
  1650  		if srcObj.tempFileStartedUpload() {
  1651  			fs.Errorf(srcObj, "can't move - upload has already started. need to finish that")
  1652  			return nil, fs.ErrorCantMove
  1653  		}
  1654  		do = f.tempFs.Features().Move
  1655  
  1656  		// we must also update the pending queue
  1657  		err := f.cache.updatePendingUpload(srcObj.abs(), func(item *tempUploadInfo) error {
  1658  			item.DestPath = path.Join(f.Root(), remote)
  1659  			item.AddedOn = time.Now()
  1660  			return nil
  1661  		})
  1662  		if err != nil {
  1663  			fs.Errorf(srcObj, "failed to rename queued file for upload: %v", err)
  1664  			return nil, fs.ErrorCantMove
  1665  		}
  1666  		fs.Debugf(srcObj, "move: queued file moved to %v", remote)
  1667  	}
  1668  
  1669  	obj, err := do(ctx, srcObj.Object, remote)
  1670  	if err != nil {
  1671  		fs.Errorf(srcObj, "error moving: %v", err)
  1672  		return nil, err
  1673  	}
  1674  	fs.Debugf(obj, "move: file moved")
  1675  
  1676  	// remove old
  1677  	err = f.cache.RemoveObject(srcObj.abs())
  1678  	if err != nil {
  1679  		fs.Errorf(srcObj, "move: remove error: %v", err)
  1680  	} else {
  1681  		fs.Debugf(srcObj, "move: removed from cache")
  1682  	}
  1683  	// expire old parent
  1684  	parentCd := NewDirectory(f, cleanPath(path.Dir(srcObj.Remote())))
  1685  	err = f.cache.ExpireDir(parentCd)
  1686  	if err != nil {
  1687  		fs.Errorf(parentCd, "move: parent cache expire error: %v", err)
  1688  	} else {
  1689  		fs.Infof(parentCd, "move: cache expired")
  1690  	}
  1691  	// advertise to ChangeNotify if wrapped doesn't do that
  1692  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1693  	// persist new
  1694  	cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
  1695  	fs.Debugf(cachedObj, "move: added to cache")
  1696  	// expire new parent
  1697  	parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1698  	err = f.cache.ExpireDir(parentCd)
  1699  	if err != nil {
  1700  		fs.Errorf(parentCd, "move: expire error: %v", err)
  1701  	} else {
  1702  		fs.Infof(parentCd, "move: cache expired")
  1703  	}
  1704  	// advertise to ChangeNotify if wrapped doesn't do that
  1705  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1706  
  1707  	return cachedObj, nil
  1708  }
  1709  
  1710  // Hashes returns the supported hash sets.
  1711  func (f *Fs) Hashes() hash.Set {
  1712  	return f.Fs.Hashes()
  1713  }
  1714  
  1715  // Purge all files in the directory
  1716  func (f *Fs) Purge(ctx context.Context, dir string) error {
  1717  	if dir == "" {
  1718  		// FIXME this isn't quite right as it should purge the dir prefix
  1719  		fs.Infof(f, "purging cache")
  1720  		f.cache.Purge()
  1721  	}
  1722  
  1723  	do := f.Fs.Features().Purge
  1724  	if do == nil {
  1725  		return fs.ErrorCantPurge
  1726  	}
  1727  
  1728  	err := do(ctx, dir)
  1729  	if err != nil {
  1730  		return err
  1731  	}
  1732  
  1733  	return nil
  1734  }
  1735  
  1736  // CleanUp the trash in the Fs
  1737  func (f *Fs) CleanUp(ctx context.Context) error {
  1738  	f.CleanUpCache(false)
  1739  
  1740  	do := f.Fs.Features().CleanUp
  1741  	if do == nil {
  1742  		return nil
  1743  	}
  1744  
  1745  	return do(ctx)
  1746  }
  1747  
  1748  // About gets quota information from the Fs
  1749  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1750  	do := f.Fs.Features().About
  1751  	if do == nil {
  1752  		return nil, errors.New("not supported by underlying remote")
  1753  	}
  1754  	return do(ctx)
  1755  }
  1756  
  1757  // Stats returns stats about the cache storage
  1758  func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
  1759  	return f.cache.Stats()
  1760  }
  1761  
  1762  // openRateLimited will execute a closure under a rate limiter watch
  1763  func (f *Fs) openRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, error) {
  1764  	var err error
  1765  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
  1766  	defer cancel()
  1767  	start := time.Now()
  1768  
  1769  	if err = f.rateLimiter.Wait(ctx); err != nil {
  1770  		return nil, err
  1771  	}
  1772  
  1773  	elapsed := time.Since(start)
  1774  	if elapsed > time.Second*2 {
  1775  		fs.Debugf(f, "rate limited: %s", elapsed)
  1776  	}
  1777  	return fn()
  1778  }
  1779  
  1780  // CleanUpCache will cleanup only the cache data that is expired
  1781  func (f *Fs) CleanUpCache(ignoreLastTs bool) {
  1782  	f.cleanupMu.Lock()
  1783  	defer f.cleanupMu.Unlock()
  1784  
  1785  	if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
  1786  		f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
  1787  		f.lastChunkCleanup = time.Now()
  1788  	}
  1789  }
  1790  
  1791  // StopBackgroundRunners will signal all the runners to stop their work
  1792  // can be triggered from a terminate signal or from testing between runs
  1793  func (f *Fs) StopBackgroundRunners() {
  1794  	f.cleanupChan <- false
  1795  	if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
  1796  		f.backgroundRunner.close()
  1797  	}
  1798  	f.cache.Close()
  1799  	fs.Debugf(f, "Services stopped")
  1800  }
  1801  
  1802  // UnWrap returns the Fs that this Fs is wrapping
  1803  func (f *Fs) UnWrap() fs.Fs {
  1804  	return f.Fs
  1805  }
  1806  
  1807  // WrapFs returns the Fs that is wrapping this Fs
  1808  func (f *Fs) WrapFs() fs.Fs {
  1809  	return f.wrapper
  1810  }
  1811  
  1812  // SetWrapper sets the Fs that is wrapping this Fs
  1813  func (f *Fs) SetWrapper(wrapper fs.Fs) {
  1814  	f.wrapper = wrapper
  1815  }
  1816  
  1817  // isWrappedByCrypt checks if this is wrapped by a crypt remote
  1818  func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
  1819  	if f.wrapper == nil {
  1820  		return nil, false
  1821  	}
  1822  	c, ok := f.wrapper.(*crypt.Fs)
  1823  	return c, ok
  1824  }
  1825  
  1826  // cleanRootFromPath trims the root of the current fs from a path
  1827  func (f *Fs) cleanRootFromPath(p string) string {
  1828  	if f.Root() != "" {
  1829  		p = p[len(f.Root()):] // trim out root
  1830  		if len(p) > 0 {       // remove first separator
  1831  			p = p[1:]
  1832  		}
  1833  	}
  1834  
  1835  	return p
  1836  }
  1837  
  1838  func (f *Fs) isRootInPath(p string) bool {
  1839  	if f.Root() == "" {
  1840  		return true
  1841  	}
  1842  	return strings.HasPrefix(p, f.Root()+"/")
  1843  }
  1844  
  1845  // MergeDirs merges the contents of all the directories passed
  1846  // in into the first one and rmdirs the other directories.
  1847  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
  1848  	do := f.Fs.Features().MergeDirs
  1849  	if do == nil {
  1850  		return errors.New("MergeDirs not supported")
  1851  	}
  1852  	for _, dir := range dirs {
  1853  		_ = f.cache.RemoveDir(dir.Remote())
  1854  	}
  1855  	return do(ctx, dirs)
  1856  }
  1857  
  1858  // DirCacheFlush flushes the dir cache
  1859  func (f *Fs) DirCacheFlush() {
  1860  	_ = f.cache.RemoveDir("")
  1861  }
  1862  
  1863  // GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
  1864  // in the background
  1865  func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
  1866  	if f.opt.TempWritePath != "" {
  1867  		return f.backgroundRunner.notifyCh
  1868  	}
  1869  	return nil
  1870  }
  1871  
  1872  func (f *Fs) isNotifiedRemote(remote string) bool {
  1873  	f.notifiedMu.Lock()
  1874  	defer f.notifiedMu.Unlock()
  1875  
  1876  	n, ok := f.notifiedRemotes[remote]
  1877  	if !ok || !n {
  1878  		return false
  1879  	}
  1880  
  1881  	delete(f.notifiedRemotes, remote)
  1882  	return n
  1883  }
  1884  
  1885  func cleanPath(p string) string {
  1886  	p = path.Clean(p)
  1887  	if p == "." || p == "/" {
  1888  		p = ""
  1889  	}
  1890  
  1891  	return p
  1892  }
  1893  
  1894  // UserInfo returns info about the connected user
  1895  func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
  1896  	do := f.Fs.Features().UserInfo
  1897  	if do == nil {
  1898  		return nil, fs.ErrorNotImplemented
  1899  	}
  1900  	return do(ctx)
  1901  }
  1902  
  1903  // Disconnect the current user
  1904  func (f *Fs) Disconnect(ctx context.Context) error {
  1905  	do := f.Fs.Features().Disconnect
  1906  	if do == nil {
  1907  		return fs.ErrorNotImplemented
  1908  	}
  1909  	return do(ctx)
  1910  }
  1911  
  1912  // Shutdown the backend, closing any background tasks and any
  1913  // cached connections.
  1914  func (f *Fs) Shutdown(ctx context.Context) error {
  1915  	do := f.Fs.Features().Shutdown
  1916  	if do == nil {
  1917  		return nil
  1918  	}
  1919  	return do(ctx)
  1920  }
  1921  
  1922  var commandHelp = []fs.CommandHelp{
  1923  	{
  1924  		Name:  "stats",
  1925  		Short: "Print stats on the cache backend in JSON format.",
  1926  	},
  1927  }
  1928  
  1929  // Command the backend to run a named command
  1930  //
  1931  // The command run is name
  1932  // args may be used to read arguments from
  1933  // opts may be used to read optional arguments from
  1934  //
  1935  // The result should be capable of being JSON encoded
  1936  // If it is a string or a []string it will be shown to the user
  1937  // otherwise it will be JSON encoded and shown to the user like that
  1938  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
  1939  	switch name {
  1940  	case "stats":
  1941  		return f.Stats()
  1942  	default:
  1943  		return nil, fs.ErrorCommandNotFound
  1944  	}
  1945  }
  1946  
  1947  // Check the interfaces are satisfied
  1948  var (
  1949  	_ fs.Fs             = (*Fs)(nil)
  1950  	_ fs.Purger         = (*Fs)(nil)
  1951  	_ fs.Copier         = (*Fs)(nil)
  1952  	_ fs.Mover          = (*Fs)(nil)
  1953  	_ fs.DirMover       = (*Fs)(nil)
  1954  	_ fs.PutUncheckeder = (*Fs)(nil)
  1955  	_ fs.PutStreamer    = (*Fs)(nil)
  1956  	_ fs.CleanUpper     = (*Fs)(nil)
  1957  	_ fs.UnWrapper      = (*Fs)(nil)
  1958  	_ fs.Wrapper        = (*Fs)(nil)
  1959  	_ fs.ListRer        = (*Fs)(nil)
  1960  	_ fs.ChangeNotifier = (*Fs)(nil)
  1961  	_ fs.Abouter        = (*Fs)(nil)
  1962  	_ fs.UserInfoer     = (*Fs)(nil)
  1963  	_ fs.Disconnecter   = (*Fs)(nil)
  1964  	_ fs.Commander      = (*Fs)(nil)
  1965  	_ fs.MergeDirser    = (*Fs)(nil)
  1966  	_ fs.Shutdowner     = (*Fs)(nil)
  1967  )