github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/cache/cache.go (about)

     1  // +build !plan9
     2  
     3  package cache
     4  
     5  import (
     6  	"context"
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"os"
    11  	"os/signal"
    12  	"path"
    13  	"path/filepath"
    14  	"sort"
    15  	"strconv"
    16  	"strings"
    17  	"sync"
    18  	"syscall"
    19  	"time"
    20  
    21  	"github.com/pkg/errors"
    22  	"github.com/rclone/rclone/backend/crypt"
    23  	"github.com/rclone/rclone/fs"
    24  	"github.com/rclone/rclone/fs/cache"
    25  	"github.com/rclone/rclone/fs/config"
    26  	"github.com/rclone/rclone/fs/config/configmap"
    27  	"github.com/rclone/rclone/fs/config/configstruct"
    28  	"github.com/rclone/rclone/fs/config/obscure"
    29  	"github.com/rclone/rclone/fs/fspath"
    30  	"github.com/rclone/rclone/fs/hash"
    31  	"github.com/rclone/rclone/fs/rc"
    32  	"github.com/rclone/rclone/fs/walk"
    33  	"github.com/rclone/rclone/lib/atexit"
    34  	"golang.org/x/time/rate"
    35  )
    36  
    37  const (
    38  	// DefCacheChunkSize is the default value for chunk size
    39  	DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
    40  	// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
    41  	DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
    42  	// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
    43  	DefCacheChunkCleanInterval = fs.Duration(time.Minute)
    44  	// DefCacheInfoAge is the default value for object info age
    45  	DefCacheInfoAge = fs.Duration(6 * time.Hour)
    46  	// DefCacheReadRetries is the default value for read retries
    47  	DefCacheReadRetries = 10
    48  	// DefCacheTotalWorkers is how many workers run in parallel to download chunks
    49  	DefCacheTotalWorkers = 4
    50  	// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
    51  	DefCacheChunkNoMemory = false
    52  	// DefCacheRps limits the number of requests per second to the source FS
    53  	DefCacheRps = -1
    54  	// DefCacheWrites will cache file data on writes through the cache
    55  	DefCacheWrites = false
    56  	// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
    57  	DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
    58  	// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
    59  	DefCacheDbWaitTime = fs.Duration(1 * time.Second)
    60  )
    61  
    62  // Register with Fs
    63  func init() {
    64  	fs.Register(&fs.RegInfo{
    65  		Name:        "cache",
    66  		Description: "Cache a remote",
    67  		NewFs:       NewFs,
    68  		CommandHelp: commandHelp,
    69  		Options: []fs.Option{{
    70  			Name:     "remote",
    71  			Help:     "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
    72  			Required: true,
    73  		}, {
    74  			Name: "plex_url",
    75  			Help: "The URL of the Plex server",
    76  		}, {
    77  			Name: "plex_username",
    78  			Help: "The username of the Plex user",
    79  		}, {
    80  			Name:       "plex_password",
    81  			Help:       "The password of the Plex user",
    82  			IsPassword: true,
    83  		}, {
    84  			Name:     "plex_token",
    85  			Help:     "The plex token for authentication - auto set normally",
    86  			Hide:     fs.OptionHideBoth,
    87  			Advanced: true,
    88  		}, {
    89  			Name:     "plex_insecure",
    90  			Help:     "Skip all certificate verification when connecting to the Plex server",
    91  			Advanced: true,
    92  		}, {
    93  			Name: "chunk_size",
    94  			Help: `The size of a chunk (partial file data).
    95  
    96  Use lower numbers for slower connections. If the chunk size is
    97  changed, any downloaded chunks will be invalid and cache-chunk-path
    98  will need to be cleared or unexpected EOF errors will occur.`,
    99  			Default: DefCacheChunkSize,
   100  			Examples: []fs.OptionExample{{
   101  				Value: "1m",
   102  				Help:  "1MB",
   103  			}, {
   104  				Value: "5M",
   105  				Help:  "5 MB",
   106  			}, {
   107  				Value: "10M",
   108  				Help:  "10 MB",
   109  			}},
   110  		}, {
   111  			Name: "info_age",
   112  			Help: `How long to cache file structure information (directory listings, file size, times etc). 
   113  If all write operations are done through the cache then you can safely make
   114  this value very large as the cache store will also be updated in real time.`,
   115  			Default: DefCacheInfoAge,
   116  			Examples: []fs.OptionExample{{
   117  				Value: "1h",
   118  				Help:  "1 hour",
   119  			}, {
   120  				Value: "24h",
   121  				Help:  "24 hours",
   122  			}, {
   123  				Value: "48h",
   124  				Help:  "48 hours",
   125  			}},
   126  		}, {
   127  			Name: "chunk_total_size",
   128  			Help: `The total size that the chunks can take up on the local disk.
   129  
   130  If the cache exceeds this value then it will start to delete the
   131  oldest chunks until it goes under this value.`,
   132  			Default: DefCacheTotalChunkSize,
   133  			Examples: []fs.OptionExample{{
   134  				Value: "500M",
   135  				Help:  "500 MB",
   136  			}, {
   137  				Value: "1G",
   138  				Help:  "1 GB",
   139  			}, {
   140  				Value: "10G",
   141  				Help:  "10 GB",
   142  			}},
   143  		}, {
   144  			Name:     "db_path",
   145  			Default:  filepath.Join(config.CacheDir, "cache-backend"),
   146  			Help:     "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
   147  			Advanced: true,
   148  		}, {
   149  			Name:    "chunk_path",
   150  			Default: filepath.Join(config.CacheDir, "cache-backend"),
   151  			Help: `Directory to cache chunk files.
   152  
   153  Path to where partial file data (chunks) are stored locally. The remote
   154  name is appended to the final path.
   155  
   156  This config follows the "--cache-db-path". If you specify a custom
   157  location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
   158  then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
   159  			Advanced: true,
   160  		}, {
   161  			Name:     "db_purge",
   162  			Default:  false,
   163  			Help:     "Clear all the cached data for this remote on start.",
   164  			Hide:     fs.OptionHideConfigurator,
   165  			Advanced: true,
   166  		}, {
   167  			Name:    "chunk_clean_interval",
   168  			Default: DefCacheChunkCleanInterval,
   169  			Help: `How often should the cache perform cleanups of the chunk storage.
   170  The default value should be ok for most people. If you find that the
   171  cache goes over "cache-chunk-total-size" too often then try to lower
   172  this value to force it to perform cleanups more often.`,
   173  			Advanced: true,
   174  		}, {
   175  			Name:    "read_retries",
   176  			Default: DefCacheReadRetries,
   177  			Help: `How many times to retry a read from a cache storage.
   178  
   179  Since reading from a cache stream is independent from downloading file
   180  data, readers can get to a point where there's no more data in the
   181  cache.  Most of the times this can indicate a connectivity issue if
   182  cache isn't able to provide file data anymore.
   183  
   184  For really slow connections, increase this to a point where the stream is
   185  able to provide data but your experience will be very stuttering.`,
   186  			Advanced: true,
   187  		}, {
   188  			Name:    "workers",
   189  			Default: DefCacheTotalWorkers,
   190  			Help: `How many workers should run in parallel to download chunks.
   191  
   192  Higher values will mean more parallel processing (better CPU needed)
   193  and more concurrent requests on the cloud provider.  This impacts
   194  several aspects like the cloud provider API limits, more stress on the
   195  hardware that rclone runs on but it also means that streams will be
   196  more fluid and data will be available much more faster to readers.
   197  
   198  **Note**: If the optional Plex integration is enabled then this
   199  setting will adapt to the type of reading performed and the value
   200  specified here will be used as a maximum number of workers to use.`,
   201  			Advanced: true,
   202  		}, {
   203  			Name:    "chunk_no_memory",
   204  			Default: DefCacheChunkNoMemory,
   205  			Help: `Disable the in-memory cache for storing chunks during streaming.
   206  
   207  By default, cache will keep file data during streaming in RAM as well
   208  to provide it to readers as fast as possible.
   209  
   210  This transient data is evicted as soon as it is read and the number of
   211  chunks stored doesn't exceed the number of workers. However, depending
   212  on other settings like "cache-chunk-size" and "cache-workers" this footprint
   213  can increase if there are parallel streams too (multiple files being read
   214  at the same time).
   215  
   216  If the hardware permits it, use this feature to provide an overall better
   217  performance during streaming but it can also be disabled if RAM is not
   218  available on the local machine.`,
   219  			Advanced: true,
   220  		}, {
   221  			Name:    "rps",
   222  			Default: int(DefCacheRps),
   223  			Help: `Limits the number of requests per second to the source FS (-1 to disable)
   224  
   225  This setting places a hard limit on the number of requests per second
   226  that cache will be doing to the cloud provider remote and try to
   227  respect that value by setting waits between reads.
   228  
   229  If you find that you're getting banned or limited on the cloud
   230  provider through cache and know that a smaller number of requests per
   231  second will allow you to work with it then you can use this setting
   232  for that.
   233  
   234  A good balance of all the other settings should make this setting
   235  useless but it is available to set for more special cases.
   236  
   237  **NOTE**: This will limit the number of requests during streams but
   238  other API calls to the cloud provider like directory listings will
   239  still pass.`,
   240  			Advanced: true,
   241  		}, {
   242  			Name:    "writes",
   243  			Default: DefCacheWrites,
   244  			Help: `Cache file data on writes through the FS
   245  
   246  If you need to read files immediately after you upload them through
   247  cache you can enable this flag to have their data stored in the
   248  cache store at the same time during upload.`,
   249  			Advanced: true,
   250  		}, {
   251  			Name:    "tmp_upload_path",
   252  			Default: "",
   253  			Help: `Directory to keep temporary files until they are uploaded.
   254  
   255  This is the path where cache will use as a temporary storage for new
   256  files that need to be uploaded to the cloud provider.
   257  
   258  Specifying a value will enable this feature. Without it, it is
   259  completely disabled and files will be uploaded directly to the cloud
   260  provider`,
   261  			Advanced: true,
   262  		}, {
   263  			Name:    "tmp_wait_time",
   264  			Default: DefCacheTmpWaitTime,
   265  			Help: `How long should files be stored in local cache before being uploaded
   266  
   267  This is the duration that a file must wait in the temporary location
   268  _cache-tmp-upload-path_ before it is selected for upload.
   269  
   270  Note that only one file is uploaded at a time and it can take longer
   271  to start the upload if a queue formed for this purpose.`,
   272  			Advanced: true,
   273  		}, {
   274  			Name:    "db_wait_time",
   275  			Default: DefCacheDbWaitTime,
   276  			Help: `How long to wait for the DB to be available - 0 is unlimited
   277  
   278  Only one process can have the DB open at any one time, so rclone waits
   279  for this duration for the DB to become available before it gives an
   280  error.
   281  
   282  If you set it to 0 then it will wait forever.`,
   283  			Advanced: true,
   284  		}},
   285  	})
   286  }
   287  
   288  // Options defines the configuration for this backend
   289  type Options struct {
   290  	Remote             string        `config:"remote"`
   291  	PlexURL            string        `config:"plex_url"`
   292  	PlexUsername       string        `config:"plex_username"`
   293  	PlexPassword       string        `config:"plex_password"`
   294  	PlexToken          string        `config:"plex_token"`
   295  	PlexInsecure       bool          `config:"plex_insecure"`
   296  	ChunkSize          fs.SizeSuffix `config:"chunk_size"`
   297  	InfoAge            fs.Duration   `config:"info_age"`
   298  	ChunkTotalSize     fs.SizeSuffix `config:"chunk_total_size"`
   299  	DbPath             string        `config:"db_path"`
   300  	ChunkPath          string        `config:"chunk_path"`
   301  	DbPurge            bool          `config:"db_purge"`
   302  	ChunkCleanInterval fs.Duration   `config:"chunk_clean_interval"`
   303  	ReadRetries        int           `config:"read_retries"`
   304  	TotalWorkers       int           `config:"workers"`
   305  	ChunkNoMemory      bool          `config:"chunk_no_memory"`
   306  	Rps                int           `config:"rps"`
   307  	StoreWrites        bool          `config:"writes"`
   308  	TempWritePath      string        `config:"tmp_upload_path"`
   309  	TempWaitTime       fs.Duration   `config:"tmp_wait_time"`
   310  	DbWaitTime         fs.Duration   `config:"db_wait_time"`
   311  }
   312  
   313  // Fs represents a wrapped fs.Fs
   314  type Fs struct {
   315  	fs.Fs
   316  	wrapper fs.Fs
   317  
   318  	name     string
   319  	root     string
   320  	opt      Options      // parsed options
   321  	features *fs.Features // optional features
   322  	cache    *Persistent
   323  	tempFs   fs.Fs
   324  
   325  	lastChunkCleanup time.Time
   326  	cleanupMu        sync.Mutex
   327  	rateLimiter      *rate.Limiter
   328  	plexConnector    *plexConnector
   329  	backgroundRunner *backgroundWriter
   330  	cleanupChan      chan bool
   331  	parentsForgetFn  []func(string, fs.EntryType)
   332  	notifiedRemotes  map[string]bool
   333  	notifiedMu       sync.Mutex
   334  	parentsForgetMu  sync.Mutex
   335  }
   336  
   337  // parseRootPath returns a cleaned root path and a nil error or "" and an error when the path is invalid
   338  func parseRootPath(path string) (string, error) {
   339  	return strings.Trim(path, "/"), nil
   340  }
   341  
   342  // NewFs constructs an Fs from the path, container:path
   343  func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
   344  	// Parse config into Options struct
   345  	opt := new(Options)
   346  	err := configstruct.Set(m, opt)
   347  	if err != nil {
   348  		return nil, err
   349  	}
   350  	if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
   351  		return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
   352  			opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
   353  	}
   354  
   355  	if strings.HasPrefix(opt.Remote, name+":") {
   356  		return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
   357  	}
   358  
   359  	rpath, err := parseRootPath(rootPath)
   360  	if err != nil {
   361  		return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
   362  	}
   363  
   364  	wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
   365  	if err != nil {
   366  		return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
   367  	}
   368  
   369  	remotePath := fspath.JoinRootPath(wPath, rootPath)
   370  	wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
   371  	if wrapErr != nil && wrapErr != fs.ErrorIsFile {
   372  		return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
   373  	}
   374  	var fsErr error
   375  	fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
   376  	if wrapErr == fs.ErrorIsFile {
   377  		fsErr = fs.ErrorIsFile
   378  		rpath = cleanPath(path.Dir(rpath))
   379  	}
   380  	// configure cache backend
   381  	if opt.DbPurge {
   382  		fs.Debugf(name, "Purging the DB")
   383  	}
   384  	f := &Fs{
   385  		Fs:               wrappedFs,
   386  		name:             name,
   387  		root:             rpath,
   388  		opt:              *opt,
   389  		lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
   390  		cleanupChan:      make(chan bool, 1),
   391  		notifiedRemotes:  make(map[string]bool),
   392  	}
   393  	f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
   394  
   395  	f.plexConnector = &plexConnector{}
   396  	if opt.PlexURL != "" {
   397  		if opt.PlexToken != "" {
   398  			f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
   399  			if err != nil {
   400  				return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
   401  			}
   402  		} else {
   403  			if opt.PlexPassword != "" && opt.PlexUsername != "" {
   404  				decPass, err := obscure.Reveal(opt.PlexPassword)
   405  				if err != nil {
   406  					decPass = opt.PlexPassword
   407  				}
   408  				f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
   409  					m.Set("plex_token", token)
   410  				})
   411  				if err != nil {
   412  					return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
   413  				}
   414  			}
   415  		}
   416  	}
   417  
   418  	dbPath := f.opt.DbPath
   419  	chunkPath := f.opt.ChunkPath
   420  	// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
   421  	if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
   422  		chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
   423  		chunkPath = dbPath
   424  	}
   425  	if filepath.Ext(dbPath) != "" {
   426  		dbPath = filepath.Dir(dbPath)
   427  	}
   428  	if filepath.Ext(chunkPath) != "" {
   429  		chunkPath = filepath.Dir(chunkPath)
   430  	}
   431  	err = os.MkdirAll(dbPath, os.ModePerm)
   432  	if err != nil {
   433  		return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
   434  	}
   435  	err = os.MkdirAll(chunkPath, os.ModePerm)
   436  	if err != nil {
   437  		return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
   438  	}
   439  
   440  	dbPath = filepath.Join(dbPath, name+".db")
   441  	chunkPath = filepath.Join(chunkPath, name)
   442  	fs.Infof(name, "Cache DB path: %v", dbPath)
   443  	fs.Infof(name, "Cache chunk path: %v", chunkPath)
   444  	f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
   445  		PurgeDb:    opt.DbPurge,
   446  		DbWaitTime: time.Duration(opt.DbWaitTime),
   447  	})
   448  	if err != nil {
   449  		return nil, errors.Wrapf(err, "failed to start cache db")
   450  	}
   451  	// Trap SIGINT and SIGTERM to close the DB handle gracefully
   452  	c := make(chan os.Signal, 1)
   453  	signal.Notify(c, syscall.SIGHUP)
   454  	atexit.Register(func() {
   455  		if opt.PlexURL != "" {
   456  			f.plexConnector.closeWebsocket()
   457  		}
   458  		f.StopBackgroundRunners()
   459  	})
   460  	go func() {
   461  		for {
   462  			s := <-c
   463  			if s == syscall.SIGHUP {
   464  				fs.Infof(f, "Clearing cache from signal")
   465  				f.DirCacheFlush()
   466  			}
   467  		}
   468  	}()
   469  
   470  	fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
   471  	fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
   472  	fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
   473  	fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
   474  	fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
   475  	fs.Infof(name, "File Age: %v", f.opt.InfoAge)
   476  	if f.opt.StoreWrites {
   477  		fs.Infof(name, "Cache Writes: enabled")
   478  	}
   479  
   480  	if f.opt.TempWritePath != "" {
   481  		err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
   482  		if err != nil {
   483  			return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
   484  		}
   485  		f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
   486  		f.tempFs, err = cache.Get(f.opt.TempWritePath)
   487  		if err != nil {
   488  			return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
   489  		}
   490  		fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
   491  		fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
   492  		f.backgroundRunner, _ = initBackgroundUploader(f)
   493  		go f.backgroundRunner.run()
   494  	}
   495  
   496  	go func() {
   497  		for {
   498  			time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
   499  			select {
   500  			case <-f.cleanupChan:
   501  				fs.Infof(f, "stopping cleanup")
   502  				return
   503  			default:
   504  				fs.Debugf(f, "starting cleanup")
   505  				f.CleanUpCache(false)
   506  			}
   507  		}
   508  	}()
   509  
   510  	if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
   511  		pollInterval := make(chan time.Duration, 1)
   512  		pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
   513  		doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
   514  	}
   515  
   516  	f.features = (&fs.Features{
   517  		CanHaveEmptyDirectories: true,
   518  		DuplicateFiles:          false, // storage doesn't permit this
   519  	}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
   520  	// override only those features that use a temp fs and it doesn't support them
   521  	//f.features.ChangeNotify = f.ChangeNotify
   522  	if f.opt.TempWritePath != "" {
   523  		if f.tempFs.Features().Move == nil {
   524  			f.features.Move = nil
   525  		}
   526  		if f.tempFs.Features().Move == nil {
   527  			f.features.Move = nil
   528  		}
   529  		if f.tempFs.Features().DirMove == nil {
   530  			f.features.DirMove = nil
   531  		}
   532  		if f.tempFs.Features().MergeDirs == nil {
   533  			f.features.MergeDirs = nil
   534  		}
   535  	}
   536  	// even if the wrapped fs doesn't support it, we still want it
   537  	f.features.DirCacheFlush = f.DirCacheFlush
   538  
   539  	rc.Add(rc.Call{
   540  		Path:  "cache/expire",
   541  		Fn:    f.httpExpireRemote,
   542  		Title: "Purge a remote from cache",
   543  		Help: `
   544  Purge a remote from the cache backend. Supports either a directory or a file.
   545  Params:
   546    - remote = path to remote (required)
   547    - withData = true/false to delete cached data (chunks) as well (optional)
   548  
   549  Eg
   550  
   551      rclone rc cache/expire remote=path/to/sub/folder/
   552      rclone rc cache/expire remote=/ withData=true 
   553  `,
   554  	})
   555  
   556  	rc.Add(rc.Call{
   557  		Path:  "cache/stats",
   558  		Fn:    f.httpStats,
   559  		Title: "Get cache stats",
   560  		Help: `
   561  Show statistics for the cache remote.
   562  `,
   563  	})
   564  
   565  	rc.Add(rc.Call{
   566  		Path:  "cache/fetch",
   567  		Fn:    f.rcFetch,
   568  		Title: "Fetch file chunks",
   569  		Help: `
   570  Ensure the specified file chunks are cached on disk.
   571  
   572  The chunks= parameter specifies the file chunks to check.
   573  It takes a comma separated list of array slice indices.
   574  The slice indices are similar to Python slices: start[:end]
   575  
   576  start is the 0 based chunk number from the beginning of the file
   577  to fetch inclusive. end is 0 based chunk number from the beginning
   578  of the file to fetch exclusive.
   579  Both values can be negative, in which case they count from the back
   580  of the file. The value "-5:" represents the last 5 chunks of a file.
   581  
   582  Some valid examples are:
   583  ":5,-5:" -> the first and last five chunks
   584  "0,-2" -> the first and the second last chunk
   585  "0:10" -> the first ten chunks
   586  
   587  Any parameter with a key that starts with "file" can be used to
   588  specify files to fetch, eg
   589  
   590      rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
   591  
   592  File names will automatically be encrypted when the a crypt remote
   593  is used on top of the cache.
   594  
   595  `,
   596  	})
   597  
   598  	return f, fsErr
   599  }
   600  
   601  func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   602  	out = make(rc.Params)
   603  	m, err := f.Stats()
   604  	if err != nil {
   605  		return out, errors.Errorf("error while getting cache stats")
   606  	}
   607  	out["status"] = "ok"
   608  	out["stats"] = m
   609  	return out, nil
   610  }
   611  
   612  func (f *Fs) unwrapRemote(remote string) string {
   613  	remote = cleanPath(remote)
   614  	if remote != "" {
   615  		// if it's wrapped by crypt we need to check what format we got
   616  		if cryptFs, yes := f.isWrappedByCrypt(); yes {
   617  			_, err := cryptFs.DecryptFileName(remote)
   618  			// if it failed to decrypt then it is a decrypted format and we need to encrypt it
   619  			if err != nil {
   620  				return cryptFs.EncryptFileName(remote)
   621  			}
   622  			// else it's an encrypted format and we can use it as it is
   623  		}
   624  	}
   625  	return remote
   626  }
   627  
   628  func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   629  	out = make(rc.Params)
   630  	remoteInt, ok := in["remote"]
   631  	if !ok {
   632  		return out, errors.Errorf("remote is needed")
   633  	}
   634  	remote := remoteInt.(string)
   635  	withData := false
   636  	_, ok = in["withData"]
   637  	if ok {
   638  		withData = true
   639  	}
   640  
   641  	remote = f.unwrapRemote(remote)
   642  	if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
   643  		return out, errors.Errorf("%s doesn't exist in cache", remote)
   644  	}
   645  
   646  	co := NewObject(f, remote)
   647  	err = f.cache.GetObject(co)
   648  	if err != nil { // it could be a dir
   649  		cd := NewDirectory(f, remote)
   650  		err := f.cache.ExpireDir(cd)
   651  		if err != nil {
   652  			return out, errors.WithMessage(err, "error expiring directory")
   653  		}
   654  		// notify vfs too
   655  		f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
   656  		out["status"] = "ok"
   657  		out["message"] = fmt.Sprintf("cached directory cleared: %v", remote)
   658  		return out, nil
   659  	}
   660  	// expire the entry
   661  	err = f.cache.ExpireObject(co, withData)
   662  	if err != nil {
   663  		return out, errors.WithMessage(err, "error expiring file")
   664  	}
   665  	// notify vfs too
   666  	f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
   667  
   668  	out["status"] = "ok"
   669  	out["message"] = fmt.Sprintf("cached file cleared: %v", remote)
   670  	return out, nil
   671  }
   672  
   673  func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
   674  	type chunkRange struct {
   675  		start, end int64
   676  	}
   677  	parseChunks := func(ranges string) (crs []chunkRange, err error) {
   678  		for _, part := range strings.Split(ranges, ",") {
   679  			var start, end int64 = 0, math.MaxInt64
   680  			switch ints := strings.Split(part, ":"); len(ints) {
   681  			case 1:
   682  				start, err = strconv.ParseInt(ints[0], 10, 64)
   683  				if err != nil {
   684  					return nil, errors.Errorf("invalid range: %q", part)
   685  				}
   686  				end = start + 1
   687  			case 2:
   688  				if ints[0] != "" {
   689  					start, err = strconv.ParseInt(ints[0], 10, 64)
   690  					if err != nil {
   691  						return nil, errors.Errorf("invalid range: %q", part)
   692  					}
   693  				}
   694  				if ints[1] != "" {
   695  					end, err = strconv.ParseInt(ints[1], 10, 64)
   696  					if err != nil {
   697  						return nil, errors.Errorf("invalid range: %q", part)
   698  					}
   699  				}
   700  			default:
   701  				return nil, errors.Errorf("invalid range: %q", part)
   702  			}
   703  			crs = append(crs, chunkRange{start: start, end: end})
   704  		}
   705  		return
   706  	}
   707  	walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
   708  		if size <= 0 {
   709  			return
   710  		}
   711  		chunks := (size-1)/f.ChunkSize() + 1
   712  
   713  		start, end := cr.start, cr.end
   714  		if start < 0 {
   715  			start += chunks
   716  		}
   717  		if end <= 0 {
   718  			end += chunks
   719  		}
   720  		if end <= start {
   721  			return
   722  		}
   723  		switch {
   724  		case start < 0:
   725  			start = 0
   726  		case start >= chunks:
   727  			return
   728  		}
   729  		switch {
   730  		case end <= start:
   731  			end = start + 1
   732  		case end >= chunks:
   733  			end = chunks
   734  		}
   735  		for i := start; i < end; i++ {
   736  			cb(i)
   737  		}
   738  	}
   739  	walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
   740  		for _, cr := range crs {
   741  			walkChunkRange(cr, size, cb)
   742  		}
   743  	}
   744  
   745  	v, ok := in["chunks"]
   746  	if !ok {
   747  		return nil, errors.New("missing chunks parameter")
   748  	}
   749  	s, ok := v.(string)
   750  	if !ok {
   751  		return nil, errors.New("invalid chunks parameter")
   752  	}
   753  	delete(in, "chunks")
   754  	crs, err := parseChunks(s)
   755  	if err != nil {
   756  		return nil, errors.Wrap(err, "invalid chunks parameter")
   757  	}
   758  	var files [][2]string
   759  	for k, v := range in {
   760  		if !strings.HasPrefix(k, "file") {
   761  			return nil, errors.Errorf("invalid parameter %s=%s", k, v)
   762  		}
   763  		switch v := v.(type) {
   764  		case string:
   765  			files = append(files, [2]string{v, f.unwrapRemote(v)})
   766  		default:
   767  			return nil, errors.Errorf("invalid parameter %s=%s", k, v)
   768  		}
   769  	}
   770  	type fileStatus struct {
   771  		Error         string
   772  		FetchedChunks int
   773  	}
   774  	fetchedChunks := make(map[string]fileStatus, len(files))
   775  	for _, pair := range files {
   776  		file, remote := pair[0], pair[1]
   777  		var status fileStatus
   778  		o, err := f.NewObject(ctx, remote)
   779  		if err != nil {
   780  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   781  			continue
   782  		}
   783  		co := o.(*Object)
   784  		err = co.refreshFromSource(ctx, true)
   785  		if err != nil {
   786  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   787  			continue
   788  		}
   789  		handle := NewObjectHandle(ctx, co, f)
   790  		handle.UseMemory = false
   791  		handle.scaleWorkers(1)
   792  		walkChunkRanges(crs, co.Size(), func(chunk int64) {
   793  			_, err := handle.getChunk(chunk * f.ChunkSize())
   794  			if err != nil {
   795  				if status.Error == "" {
   796  					status.Error = err.Error()
   797  				}
   798  			} else {
   799  				status.FetchedChunks++
   800  			}
   801  		})
   802  		fetchedChunks[file] = status
   803  	}
   804  
   805  	return rc.Params{"status": fetchedChunks}, nil
   806  }
   807  
   808  // receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
   809  func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
   810  	if crypt, yes := f.isWrappedByCrypt(); yes {
   811  		decryptedPath, err := crypt.DecryptFileName(forgetPath)
   812  		if err == nil {
   813  			fs.Infof(decryptedPath, "received cache expiry notification")
   814  		} else {
   815  			fs.Infof(forgetPath, "received cache expiry notification")
   816  		}
   817  	} else {
   818  		fs.Infof(forgetPath, "received cache expiry notification")
   819  	}
   820  	// notify upstreams too (vfs)
   821  	f.notifyChangeUpstream(forgetPath, entryType)
   822  
   823  	var cd *Directory
   824  	if entryType == fs.EntryObject {
   825  		co := NewObject(f, forgetPath)
   826  		err := f.cache.GetObject(co)
   827  		if err != nil {
   828  			fs.Debugf(f, "got change notification for non cached entry %v", co)
   829  		}
   830  		err = f.cache.ExpireObject(co, true)
   831  		if err != nil {
   832  			fs.Debugf(forgetPath, "notify: error expiring '%v': %v", co, err)
   833  		}
   834  		cd = NewDirectory(f, cleanPath(path.Dir(co.Remote())))
   835  	} else {
   836  		cd = NewDirectory(f, forgetPath)
   837  	}
   838  	// we expire the dir
   839  	err := f.cache.ExpireDir(cd)
   840  	if err != nil {
   841  		fs.Debugf(forgetPath, "notify: error expiring '%v': %v", cd, err)
   842  	} else {
   843  		fs.Debugf(forgetPath, "notify: expired '%v'", cd)
   844  	}
   845  
   846  	f.notifiedMu.Lock()
   847  	defer f.notifiedMu.Unlock()
   848  	f.notifiedRemotes[forgetPath] = true
   849  	f.notifiedRemotes[cd.Remote()] = true
   850  }
   851  
   852  // notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
   853  // or if we use a temp fs
   854  func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
   855  	if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
   856  		f.notifyChangeUpstream(remote, entryType)
   857  	}
   858  }
   859  
   860  // notifyChangeUpstream will loop through all the upstreams and notify
   861  // of the provided remote (should be only a dir)
   862  func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
   863  	f.parentsForgetMu.Lock()
   864  	defer f.parentsForgetMu.Unlock()
   865  	if len(f.parentsForgetFn) > 0 {
   866  		for _, fn := range f.parentsForgetFn {
   867  			fn(remote, entryType)
   868  		}
   869  	}
   870  }
   871  
   872  // ChangeNotify can subscribe multiple callers
   873  // this is coupled with the wrapped fs ChangeNotify (if it supports it)
   874  // and also notifies other caches (i.e VFS) to clear out whenever something changes
   875  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
   876  	f.parentsForgetMu.Lock()
   877  	defer f.parentsForgetMu.Unlock()
   878  	fs.Debugf(f, "subscribing to ChangeNotify")
   879  	f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
   880  	go func() {
   881  		for range pollInterval {
   882  		}
   883  	}()
   884  }
   885  
   886  // Name of the remote (as passed into NewFs)
   887  func (f *Fs) Name() string {
   888  	return f.name
   889  }
   890  
   891  // Root of the remote (as passed into NewFs)
   892  func (f *Fs) Root() string {
   893  	return f.root
   894  }
   895  
   896  // Features returns the optional features of this Fs
   897  func (f *Fs) Features() *fs.Features {
   898  	return f.features
   899  }
   900  
   901  // String returns a description of the FS
   902  func (f *Fs) String() string {
   903  	return fmt.Sprintf("Cache remote %s:%s", f.name, f.root)
   904  }
   905  
   906  // ChunkSize returns the configured chunk size
   907  func (f *Fs) ChunkSize() int64 {
   908  	return int64(f.opt.ChunkSize)
   909  }
   910  
   911  // InfoAge returns the configured file age
   912  func (f *Fs) InfoAge() time.Duration {
   913  	return time.Duration(f.opt.InfoAge)
   914  }
   915  
   916  // TempUploadWaitTime returns the configured temp file upload wait time
   917  func (f *Fs) TempUploadWaitTime() time.Duration {
   918  	return time.Duration(f.opt.TempWaitTime)
   919  }
   920  
   921  // NewObject finds the Object at remote.
   922  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   923  	var err error
   924  
   925  	fs.Debugf(f, "new object '%s'", remote)
   926  	co := NewObject(f, remote)
   927  	// search for entry in cache and validate it
   928  	err = f.cache.GetObject(co)
   929  	if err != nil {
   930  		fs.Debugf(remote, "find: error: %v", err)
   931  	} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   932  		fs.Debugf(co, "find: cold object: %+v", co)
   933  	} else {
   934  		fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   935  		return co, nil
   936  	}
   937  
   938  	// search for entry in source or temp fs
   939  	var obj fs.Object
   940  	if f.opt.TempWritePath != "" {
   941  		obj, err = f.tempFs.NewObject(ctx, remote)
   942  		// not found in temp fs
   943  		if err != nil {
   944  			fs.Debugf(remote, "find: not found in local cache fs")
   945  			obj, err = f.Fs.NewObject(ctx, remote)
   946  		} else {
   947  			fs.Debugf(obj, "find: found in local cache fs")
   948  		}
   949  	} else {
   950  		obj, err = f.Fs.NewObject(ctx, remote)
   951  	}
   952  
   953  	// not found in either fs
   954  	if err != nil {
   955  		fs.Debugf(obj, "find failed: not found in either local or remote fs")
   956  		return nil, err
   957  	}
   958  
   959  	// cache the new entry
   960  	co = ObjectFromOriginal(ctx, f, obj).persist()
   961  	fs.Debugf(co, "find: cached object")
   962  	return co, nil
   963  }
   964  
   965  // List the objects and directories in dir into entries
   966  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   967  	fs.Debugf(f, "list '%s'", dir)
   968  	cd := ShallowDirectory(f, dir)
   969  
   970  	// search for cached dir entries and validate them
   971  	entries, err = f.cache.GetDirEntries(cd)
   972  	if err != nil {
   973  		fs.Debugf(dir, "list: error: %v", err)
   974  	} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   975  		fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
   976  	} else if len(entries) == 0 {
   977  		// TODO: read empty dirs from source?
   978  		fs.Debugf(dir, "list: empty listing")
   979  	} else {
   980  		fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   981  		fs.Debugf(dir, "list: cached entries: %v", entries)
   982  		return entries, nil
   983  	}
   984  
   985  	// we first search any temporary files stored locally
   986  	var cachedEntries fs.DirEntries
   987  	if f.opt.TempWritePath != "" {
   988  		queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
   989  		if err != nil {
   990  			fs.Errorf(dir, "list: error getting pending uploads: %v", err)
   991  		} else {
   992  			fs.Debugf(dir, "list: read %v from temp fs", len(queuedEntries))
   993  			fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
   994  
   995  			for _, queuedRemote := range queuedEntries {
   996  				queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
   997  				if err != nil {
   998  					fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
   999  					continue
  1000  				}
  1001  				co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
  1002  				fs.Debugf(co, "list: cached temp object")
  1003  				cachedEntries = append(cachedEntries, co)
  1004  			}
  1005  		}
  1006  	}
  1007  
  1008  	// search from the source
  1009  	sourceEntries, err := f.Fs.List(ctx, dir)
  1010  	if err != nil {
  1011  		return nil, err
  1012  	}
  1013  	fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
  1014  	fs.Debugf(dir, "list: source entries: %v", sourceEntries)
  1015  
  1016  	sort.Sort(sourceEntries)
  1017  	for _, entry := range entries {
  1018  		entryRemote := entry.Remote()
  1019  		i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
  1020  		if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
  1021  			continue
  1022  		}
  1023  		fp := path.Join(f.Root(), entryRemote)
  1024  		switch entry.(type) {
  1025  		case fs.Object:
  1026  			_ = f.cache.RemoveObject(fp)
  1027  		case fs.Directory:
  1028  			_ = f.cache.RemoveDir(fp)
  1029  		}
  1030  		fs.Debugf(dir, "list: remove entry: %v", entryRemote)
  1031  	}
  1032  	entries = nil
  1033  
  1034  	// and then iterate over the ones from source (temp Objects will override source ones)
  1035  	var batchDirectories []*Directory
  1036  	sort.Sort(cachedEntries)
  1037  	tmpCnt := len(cachedEntries)
  1038  	for _, entry := range sourceEntries {
  1039  		switch o := entry.(type) {
  1040  		case fs.Object:
  1041  			// skip over temporary objects (might be uploading)
  1042  			oRemote := o.Remote()
  1043  			i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
  1044  			if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
  1045  				continue
  1046  			}
  1047  			co := ObjectFromOriginal(ctx, f, o).persist()
  1048  			cachedEntries = append(cachedEntries, co)
  1049  			fs.Debugf(dir, "list: cached object: %v", co)
  1050  		case fs.Directory:
  1051  			cdd := DirectoryFromOriginal(ctx, f, o)
  1052  			// check if the dir isn't expired and add it in cache if it isn't
  1053  			if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
  1054  				batchDirectories = append(batchDirectories, cdd)
  1055  			}
  1056  			cachedEntries = append(cachedEntries, cdd)
  1057  		default:
  1058  			fs.Debugf(entry, "list: Unknown object type %T", entry)
  1059  		}
  1060  	}
  1061  	err = f.cache.AddBatchDir(batchDirectories)
  1062  	if err != nil {
  1063  		fs.Errorf(dir, "list: error caching directories from listing %v", dir)
  1064  	} else {
  1065  		fs.Debugf(dir, "list: cached directories: %v", len(batchDirectories))
  1066  	}
  1067  
  1068  	// cache dir meta
  1069  	t := time.Now()
  1070  	cd.CacheTs = &t
  1071  	err = f.cache.AddDir(cd)
  1072  	if err != nil {
  1073  		fs.Errorf(cd, "list: save error: '%v'", err)
  1074  	} else {
  1075  		fs.Debugf(dir, "list: cached dir: '%v', cache ts: %v", cd.abs(), cd.CacheTs)
  1076  	}
  1077  
  1078  	return cachedEntries, nil
  1079  }
  1080  
  1081  func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
  1082  	entries, err := f.List(ctx, dir)
  1083  	if err != nil {
  1084  		return err
  1085  	}
  1086  
  1087  	for i := 0; i < len(entries); i++ {
  1088  		innerDir, ok := entries[i].(fs.Directory)
  1089  		if ok {
  1090  			err := f.recurse(ctx, innerDir.Remote(), list)
  1091  			if err != nil {
  1092  				return err
  1093  			}
  1094  		}
  1095  
  1096  		err := list.Add(entries[i])
  1097  		if err != nil {
  1098  			return err
  1099  		}
  1100  	}
  1101  
  1102  	return nil
  1103  }
  1104  
  1105  // ListR lists the objects and directories of the Fs starting
  1106  // from dir recursively into out.
  1107  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1108  	fs.Debugf(f, "list recursively from '%s'", dir)
  1109  
  1110  	// we check if the source FS supports ListR
  1111  	// if it does, we'll use that to get all the entries, cache them and return
  1112  	do := f.Fs.Features().ListR
  1113  	if do != nil {
  1114  		return do(ctx, dir, func(entries fs.DirEntries) error {
  1115  			// we got called back with a set of entries so let's cache them and call the original callback
  1116  			for _, entry := range entries {
  1117  				switch o := entry.(type) {
  1118  				case fs.Object:
  1119  					_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
  1120  				case fs.Directory:
  1121  					_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
  1122  				default:
  1123  					return errors.Errorf("Unknown object type %T", entry)
  1124  				}
  1125  			}
  1126  
  1127  			// call the original callback
  1128  			return callback(entries)
  1129  		})
  1130  	}
  1131  
  1132  	// if we're here, we're gonna do a standard recursive traversal and cache everything
  1133  	list := walk.NewListRHelper(callback)
  1134  	err = f.recurse(ctx, dir, list)
  1135  	if err != nil {
  1136  		return err
  1137  	}
  1138  
  1139  	return list.Flush()
  1140  }
  1141  
  1142  // Mkdir makes the directory (container, bucket)
  1143  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1144  	fs.Debugf(f, "mkdir '%s'", dir)
  1145  	err := f.Fs.Mkdir(ctx, dir)
  1146  	if err != nil {
  1147  		return err
  1148  	}
  1149  	fs.Debugf(dir, "mkdir: created dir in source fs")
  1150  
  1151  	cd := NewDirectory(f, cleanPath(dir))
  1152  	err = f.cache.AddDir(cd)
  1153  	if err != nil {
  1154  		fs.Errorf(dir, "mkdir: add error: %v", err)
  1155  	} else {
  1156  		fs.Debugf(cd, "mkdir: added to cache")
  1157  	}
  1158  	// expire parent of new dir
  1159  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1160  	err = f.cache.ExpireDir(parentCd)
  1161  	if err != nil {
  1162  		fs.Errorf(parentCd, "mkdir: cache expire error: %v", err)
  1163  	} else {
  1164  		fs.Infof(parentCd, "mkdir: cache expired")
  1165  	}
  1166  	// advertise to ChangeNotify if wrapped doesn't do that
  1167  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1168  
  1169  	return nil
  1170  }
  1171  
  1172  // Rmdir removes the directory (container, bucket) if empty
  1173  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1174  	fs.Debugf(f, "rmdir '%s'", dir)
  1175  
  1176  	if f.opt.TempWritePath != "" {
  1177  		// pause background uploads
  1178  		f.backgroundRunner.pause()
  1179  		defer f.backgroundRunner.play()
  1180  
  1181  		// we check if the source exists on the remote and make the same move on it too if it does
  1182  		// otherwise, we skip this step
  1183  		_, err := f.UnWrap().List(ctx, dir)
  1184  		if err == nil {
  1185  			err := f.Fs.Rmdir(ctx, dir)
  1186  			if err != nil {
  1187  				return err
  1188  			}
  1189  			fs.Debugf(dir, "rmdir: removed dir in source fs")
  1190  		}
  1191  
  1192  		var queuedEntries []*Object
  1193  		err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1194  			for _, o := range entries {
  1195  				if oo, ok := o.(fs.Object); ok {
  1196  					co := ObjectFromOriginal(ctx, f, oo)
  1197  					queuedEntries = append(queuedEntries, co)
  1198  				}
  1199  			}
  1200  			return nil
  1201  		})
  1202  		if err != nil {
  1203  			fs.Errorf(dir, "rmdir: error getting pending uploads: %v", err)
  1204  		} else {
  1205  			fs.Debugf(dir, "rmdir: read %v from temp fs", len(queuedEntries))
  1206  			fs.Debugf(dir, "rmdir: temp fs entries: %v", queuedEntries)
  1207  			if len(queuedEntries) > 0 {
  1208  				fs.Errorf(dir, "rmdir: temporary dir not empty: %v", queuedEntries)
  1209  				return fs.ErrorDirectoryNotEmpty
  1210  			}
  1211  		}
  1212  	} else {
  1213  		err := f.Fs.Rmdir(ctx, dir)
  1214  		if err != nil {
  1215  			return err
  1216  		}
  1217  		fs.Debugf(dir, "rmdir: removed dir in source fs")
  1218  	}
  1219  
  1220  	// remove dir data
  1221  	d := NewDirectory(f, dir)
  1222  	err := f.cache.RemoveDir(d.abs())
  1223  	if err != nil {
  1224  		fs.Errorf(dir, "rmdir: remove error: %v", err)
  1225  	} else {
  1226  		fs.Debugf(d, "rmdir: removed from cache")
  1227  	}
  1228  	// expire parent
  1229  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1230  	err = f.cache.ExpireDir(parentCd)
  1231  	if err != nil {
  1232  		fs.Errorf(dir, "rmdir: cache expire error: %v", err)
  1233  	} else {
  1234  		fs.Infof(parentCd, "rmdir: cache expired")
  1235  	}
  1236  	// advertise to ChangeNotify if wrapped doesn't do that
  1237  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1238  
  1239  	return nil
  1240  }
  1241  
  1242  // DirMove moves src, srcRemote to this remote at dstRemote
  1243  // using server side move operations.
  1244  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1245  	fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
  1246  
  1247  	do := f.Fs.Features().DirMove
  1248  	if do == nil {
  1249  		return fs.ErrorCantDirMove
  1250  	}
  1251  	srcFs, ok := src.(*Fs)
  1252  	if !ok {
  1253  		fs.Errorf(srcFs, "can't move directory - not same remote type")
  1254  		return fs.ErrorCantDirMove
  1255  	}
  1256  	if srcFs.Fs.Name() != f.Fs.Name() {
  1257  		fs.Errorf(srcFs, "can't move directory - not wrapping same remotes")
  1258  		return fs.ErrorCantDirMove
  1259  	}
  1260  
  1261  	if f.opt.TempWritePath != "" {
  1262  		// pause background uploads
  1263  		f.backgroundRunner.pause()
  1264  		defer f.backgroundRunner.play()
  1265  
  1266  		_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
  1267  		_, errInTemp := f.tempFs.List(ctx, srcRemote)
  1268  		// not found in either fs
  1269  		if errInWrap != nil && errInTemp != nil {
  1270  			return fs.ErrorDirNotFound
  1271  		}
  1272  
  1273  		// we check if the source exists on the remote and make the same move on it too if it does
  1274  		// otherwise, we skip this step
  1275  		if errInWrap == nil {
  1276  			err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1277  			if err != nil {
  1278  				return err
  1279  			}
  1280  			fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1281  		}
  1282  		// we need to check if the directory exists in the temp fs
  1283  		// and skip the move if it doesn't
  1284  		if errInTemp != nil {
  1285  			goto cleanup
  1286  		}
  1287  
  1288  		var queuedEntries []*Object
  1289  		err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1290  			for _, o := range entries {
  1291  				if oo, ok := o.(fs.Object); ok {
  1292  					co := ObjectFromOriginal(ctx, f, oo)
  1293  					queuedEntries = append(queuedEntries, co)
  1294  					if co.tempFileStartedUpload() {
  1295  						fs.Errorf(co, "can't move - upload has already started. need to finish that")
  1296  						return fs.ErrorCantDirMove
  1297  					}
  1298  				}
  1299  			}
  1300  			return nil
  1301  		})
  1302  		if err != nil {
  1303  			return err
  1304  		}
  1305  		fs.Debugf(srcRemote, "dirmove: read %v from temp fs", len(queuedEntries))
  1306  		fs.Debugf(srcRemote, "dirmove: temp fs entries: %v", queuedEntries)
  1307  
  1308  		do := f.tempFs.Features().DirMove
  1309  		if do == nil {
  1310  			fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
  1311  			return fs.ErrorCantDirMove
  1312  		}
  1313  		err = do(ctx, f.tempFs, srcRemote, dstRemote)
  1314  		if err != nil {
  1315  			return err
  1316  		}
  1317  		err = f.cache.ReconcileTempUploads(ctx, f)
  1318  		if err != nil {
  1319  			return err
  1320  		}
  1321  	} else {
  1322  		err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1323  		if err != nil {
  1324  			return err
  1325  		}
  1326  		fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1327  	}
  1328  cleanup:
  1329  
  1330  	// delete src dir from cache along with all chunks
  1331  	srcDir := NewDirectory(srcFs, srcRemote)
  1332  	err := f.cache.RemoveDir(srcDir.abs())
  1333  	if err != nil {
  1334  		fs.Errorf(srcDir, "dirmove: remove error: %v", err)
  1335  	} else {
  1336  		fs.Debugf(srcDir, "dirmove: removed cached dir")
  1337  	}
  1338  	// expire src parent
  1339  	srcParent := NewDirectory(f, cleanPath(path.Dir(srcRemote)))
  1340  	err = f.cache.ExpireDir(srcParent)
  1341  	if err != nil {
  1342  		fs.Errorf(srcParent, "dirmove: cache expire error: %v", err)
  1343  	} else {
  1344  		fs.Debugf(srcParent, "dirmove: cache expired")
  1345  	}
  1346  	// advertise to ChangeNotify if wrapped doesn't do that
  1347  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1348  
  1349  	// expire parent dir at the destination path
  1350  	dstParent := NewDirectory(f, cleanPath(path.Dir(dstRemote)))
  1351  	err = f.cache.ExpireDir(dstParent)
  1352  	if err != nil {
  1353  		fs.Errorf(dstParent, "dirmove: cache expire error: %v", err)
  1354  	} else {
  1355  		fs.Debugf(dstParent, "dirmove: cache expired")
  1356  	}
  1357  	// advertise to ChangeNotify if wrapped doesn't do that
  1358  	f.notifyChangeUpstreamIfNeeded(dstParent.Remote(), fs.EntryDirectory)
  1359  	// TODO: precache dst dir and save the chunks
  1360  
  1361  	return nil
  1362  }
  1363  
  1364  // cacheReader will split the stream of a reader to be cached at the same time it is read by the original source
  1365  func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn io.Reader)) {
  1366  	// create the pipe and tee reader
  1367  	pr, pw := io.Pipe()
  1368  	tr := io.TeeReader(u, pw)
  1369  
  1370  	// create channel to synchronize
  1371  	done := make(chan bool)
  1372  	defer close(done)
  1373  
  1374  	go func() {
  1375  		// notify the cache reader that we're complete after the source FS finishes
  1376  		defer func() {
  1377  			_ = pw.Close()
  1378  		}()
  1379  		// process original reading
  1380  		originalRead(tr)
  1381  		// signal complete
  1382  		done <- true
  1383  	}()
  1384  
  1385  	go func() {
  1386  		var offset int64
  1387  		for {
  1388  			chunk := make([]byte, f.opt.ChunkSize)
  1389  			readSize, err := io.ReadFull(pr, chunk)
  1390  			// we ignore 3 failures which are ok:
  1391  			// 1. EOF - original reading finished and we got a full buffer too
  1392  			// 2. ErrUnexpectedEOF - original reading finished and partial buffer
  1393  			// 3. ErrClosedPipe - source remote reader was closed (usually means it reached the end) and we need to stop too
  1394  			// if we have a different error: we're going to error out the original reading too and stop this
  1395  			if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF && err != io.ErrClosedPipe {
  1396  				fs.Errorf(src, "error saving new data in cache. offset: %v, err: %v", offset, err)
  1397  				_ = pr.CloseWithError(err)
  1398  				break
  1399  			}
  1400  			// if we have some bytes we cache them
  1401  			if readSize > 0 {
  1402  				chunk = chunk[:readSize]
  1403  				err2 := f.cache.AddChunk(cleanPath(path.Join(f.root, src.Remote())), chunk, offset)
  1404  				if err2 != nil {
  1405  					fs.Errorf(src, "error saving new data in cache '%v'", err2)
  1406  					_ = pr.CloseWithError(err2)
  1407  					break
  1408  				}
  1409  				offset += int64(readSize)
  1410  			}
  1411  			// stuff should be closed but let's be sure
  1412  			if err == io.EOF || err == io.ErrUnexpectedEOF || err == io.ErrClosedPipe {
  1413  				_ = pr.Close()
  1414  				break
  1415  			}
  1416  		}
  1417  
  1418  		// signal complete
  1419  		done <- true
  1420  	}()
  1421  
  1422  	// wait until both are done
  1423  	for c := 0; c < 2; c++ {
  1424  		<-done
  1425  	}
  1426  }
  1427  
  1428  type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
  1429  
  1430  // put in to the remote path
  1431  func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
  1432  	var err error
  1433  	var obj fs.Object
  1434  
  1435  	// queue for upload and store in temp fs if configured
  1436  	if f.opt.TempWritePath != "" {
  1437  		// we need to clear the caches before a put through temp fs
  1438  		parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1439  		_ = f.cache.ExpireDir(parentCd)
  1440  		f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1441  
  1442  		obj, err = f.tempFs.Put(ctx, in, src, options...)
  1443  		if err != nil {
  1444  			fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
  1445  			return nil, err
  1446  		}
  1447  		fs.Infof(obj, "put: uploaded in temp fs")
  1448  		err = f.cache.addPendingUpload(path.Join(f.Root(), src.Remote()), false)
  1449  		if err != nil {
  1450  			fs.Errorf(obj, "put: failed to queue for upload: %v", err)
  1451  			return nil, err
  1452  		}
  1453  		fs.Infof(obj, "put: queued for upload")
  1454  		// if cache writes is enabled write it first through cache
  1455  	} else if f.opt.StoreWrites {
  1456  		f.cacheReader(in, src, func(inn io.Reader) {
  1457  			obj, err = put(ctx, inn, src, options...)
  1458  		})
  1459  		if err == nil {
  1460  			fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
  1461  		}
  1462  		// last option: save it directly in remote fs
  1463  	} else {
  1464  		obj, err = put(ctx, in, src, options...)
  1465  		if err == nil {
  1466  			fs.Debugf(obj, "put: uploaded to remote fs")
  1467  		}
  1468  	}
  1469  	// validate and stop if errors are found
  1470  	if err != nil {
  1471  		fs.Errorf(src, "put: error uploading: %v", err)
  1472  		return nil, err
  1473  	}
  1474  
  1475  	// cache the new file
  1476  	cachedObj := ObjectFromOriginal(ctx, f, obj)
  1477  
  1478  	// deleting cached chunks and info to be replaced with new ones
  1479  	_ = f.cache.RemoveObject(cachedObj.abs())
  1480  
  1481  	cachedObj.persist()
  1482  	fs.Debugf(cachedObj, "put: added to cache")
  1483  
  1484  	// expire parent
  1485  	parentCd := NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1486  	err = f.cache.ExpireDir(parentCd)
  1487  	if err != nil {
  1488  		fs.Errorf(cachedObj, "put: cache expire error: %v", err)
  1489  	} else {
  1490  		fs.Infof(parentCd, "put: cache expired")
  1491  	}
  1492  	// advertise to ChangeNotify
  1493  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1494  
  1495  	return cachedObj, nil
  1496  }
  1497  
  1498  // Put in to the remote path with the modTime given of the given size
  1499  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1500  	fs.Debugf(f, "put data at '%s'", src.Remote())
  1501  	return f.put(ctx, in, src, options, f.Fs.Put)
  1502  }
  1503  
  1504  // PutUnchecked uploads the object
  1505  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1506  	do := f.Fs.Features().PutUnchecked
  1507  	if do == nil {
  1508  		return nil, errors.New("can't PutUnchecked")
  1509  	}
  1510  	fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
  1511  	return f.put(ctx, in, src, options, do)
  1512  }
  1513  
  1514  // PutStream uploads the object
  1515  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1516  	do := f.Fs.Features().PutStream
  1517  	if do == nil {
  1518  		return nil, errors.New("can't PutStream")
  1519  	}
  1520  	fs.Debugf(f, "put data streaming in '%s'", src.Remote())
  1521  	return f.put(ctx, in, src, options, do)
  1522  }
  1523  
  1524  // Copy src to this remote using server side copy operations.
  1525  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1526  	fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
  1527  
  1528  	do := f.Fs.Features().Copy
  1529  	if do == nil {
  1530  		fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
  1531  		return nil, fs.ErrorCantCopy
  1532  	}
  1533  	if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
  1534  		return nil, fs.ErrorCantCopy
  1535  	}
  1536  	// the source must be a cached object or we abort
  1537  	srcObj, ok := src.(*Object)
  1538  	if !ok {
  1539  		fs.Errorf(srcObj, "can't copy - not same remote type")
  1540  		return nil, fs.ErrorCantCopy
  1541  	}
  1542  	// both the source cache fs and this cache fs need to wrap the same remote
  1543  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1544  		fs.Errorf(srcObj, "can't copy - not wrapping same remotes")
  1545  		return nil, fs.ErrorCantCopy
  1546  	}
  1547  	// refresh from source or abort
  1548  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1549  		fs.Errorf(f, "can't copy %v - %v", src, err)
  1550  		return nil, fs.ErrorCantCopy
  1551  	}
  1552  
  1553  	if srcObj.isTempFile() {
  1554  		// we check if the feature is still active
  1555  		if f.opt.TempWritePath == "" {
  1556  			fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
  1557  			return nil, fs.ErrorCantCopy
  1558  		}
  1559  
  1560  		do = srcObj.ParentFs.Features().Copy
  1561  		if do == nil {
  1562  			fs.Errorf(src, "parent remote (%v) doesn't support Copy", srcObj.ParentFs)
  1563  			return nil, fs.ErrorCantCopy
  1564  		}
  1565  	}
  1566  
  1567  	obj, err := do(ctx, srcObj.Object, remote)
  1568  	if err != nil {
  1569  		fs.Errorf(srcObj, "error moving in cache: %v", err)
  1570  		return nil, err
  1571  	}
  1572  	fs.Debugf(obj, "copy: file copied")
  1573  
  1574  	// persist new
  1575  	co := ObjectFromOriginal(ctx, f, obj).persist()
  1576  	fs.Debugf(co, "copy: added to cache")
  1577  	// expire the destination path
  1578  	parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
  1579  	err = f.cache.ExpireDir(parentCd)
  1580  	if err != nil {
  1581  		fs.Errorf(parentCd, "copy: cache expire error: %v", err)
  1582  	} else {
  1583  		fs.Infof(parentCd, "copy: cache expired")
  1584  	}
  1585  	// advertise to ChangeNotify if wrapped doesn't do that
  1586  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1587  	// expire src parent
  1588  	srcParent := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1589  	err = f.cache.ExpireDir(srcParent)
  1590  	if err != nil {
  1591  		fs.Errorf(srcParent, "copy: cache expire error: %v", err)
  1592  	} else {
  1593  		fs.Infof(srcParent, "copy: cache expired")
  1594  	}
  1595  	// advertise to ChangeNotify if wrapped doesn't do that
  1596  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1597  
  1598  	return co, nil
  1599  }
  1600  
  1601  // Move src to this remote using server side move operations.
  1602  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1603  	fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
  1604  
  1605  	// if source fs doesn't support move abort
  1606  	do := f.Fs.Features().Move
  1607  	if do == nil {
  1608  		fs.Errorf(src, "source remote (%v) doesn't support Move", src.Fs())
  1609  		return nil, fs.ErrorCantMove
  1610  	}
  1611  	// the source must be a cached object or we abort
  1612  	srcObj, ok := src.(*Object)
  1613  	if !ok {
  1614  		fs.Errorf(srcObj, "can't move - not same remote type")
  1615  		return nil, fs.ErrorCantMove
  1616  	}
  1617  	// both the source cache fs and this cache fs need to wrap the same remote
  1618  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1619  		fs.Errorf(srcObj, "can't move - not wrapping same remote types")
  1620  		return nil, fs.ErrorCantMove
  1621  	}
  1622  	// refresh from source or abort
  1623  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1624  		fs.Errorf(f, "can't move %v - %v", src, err)
  1625  		return nil, fs.ErrorCantMove
  1626  	}
  1627  
  1628  	// if this is a temp object then we perform the changes locally
  1629  	if srcObj.isTempFile() {
  1630  		// we check if the feature is still active
  1631  		if f.opt.TempWritePath == "" {
  1632  			fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
  1633  			return nil, fs.ErrorCantMove
  1634  		}
  1635  		// pause background uploads
  1636  		f.backgroundRunner.pause()
  1637  		defer f.backgroundRunner.play()
  1638  
  1639  		// started uploads can't be moved until they complete
  1640  		if srcObj.tempFileStartedUpload() {
  1641  			fs.Errorf(srcObj, "can't move - upload has already started. need to finish that")
  1642  			return nil, fs.ErrorCantMove
  1643  		}
  1644  		do = f.tempFs.Features().Move
  1645  
  1646  		// we must also update the pending queue
  1647  		err := f.cache.updatePendingUpload(srcObj.abs(), func(item *tempUploadInfo) error {
  1648  			item.DestPath = path.Join(f.Root(), remote)
  1649  			item.AddedOn = time.Now()
  1650  			return nil
  1651  		})
  1652  		if err != nil {
  1653  			fs.Errorf(srcObj, "failed to rename queued file for upload: %v", err)
  1654  			return nil, fs.ErrorCantMove
  1655  		}
  1656  		fs.Debugf(srcObj, "move: queued file moved to %v", remote)
  1657  	}
  1658  
  1659  	obj, err := do(ctx, srcObj.Object, remote)
  1660  	if err != nil {
  1661  		fs.Errorf(srcObj, "error moving: %v", err)
  1662  		return nil, err
  1663  	}
  1664  	fs.Debugf(obj, "move: file moved")
  1665  
  1666  	// remove old
  1667  	err = f.cache.RemoveObject(srcObj.abs())
  1668  	if err != nil {
  1669  		fs.Errorf(srcObj, "move: remove error: %v", err)
  1670  	} else {
  1671  		fs.Debugf(srcObj, "move: removed from cache")
  1672  	}
  1673  	// expire old parent
  1674  	parentCd := NewDirectory(f, cleanPath(path.Dir(srcObj.Remote())))
  1675  	err = f.cache.ExpireDir(parentCd)
  1676  	if err != nil {
  1677  		fs.Errorf(parentCd, "move: parent cache expire error: %v", err)
  1678  	} else {
  1679  		fs.Infof(parentCd, "move: cache expired")
  1680  	}
  1681  	// advertise to ChangeNotify if wrapped doesn't do that
  1682  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1683  	// persist new
  1684  	cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
  1685  	fs.Debugf(cachedObj, "move: added to cache")
  1686  	// expire new parent
  1687  	parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1688  	err = f.cache.ExpireDir(parentCd)
  1689  	if err != nil {
  1690  		fs.Errorf(parentCd, "move: expire error: %v", err)
  1691  	} else {
  1692  		fs.Infof(parentCd, "move: cache expired")
  1693  	}
  1694  	// advertise to ChangeNotify if wrapped doesn't do that
  1695  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1696  
  1697  	return cachedObj, nil
  1698  }
  1699  
  1700  // Hashes returns the supported hash sets.
  1701  func (f *Fs) Hashes() hash.Set {
  1702  	return f.Fs.Hashes()
  1703  }
  1704  
  1705  // Purge all files in the root and the root directory
  1706  func (f *Fs) Purge(ctx context.Context) error {
  1707  	fs.Infof(f, "purging cache")
  1708  	f.cache.Purge()
  1709  
  1710  	do := f.Fs.Features().Purge
  1711  	if do == nil {
  1712  		return nil
  1713  	}
  1714  
  1715  	err := do(ctx)
  1716  	if err != nil {
  1717  		return err
  1718  	}
  1719  
  1720  	return nil
  1721  }
  1722  
  1723  // CleanUp the trash in the Fs
  1724  func (f *Fs) CleanUp(ctx context.Context) error {
  1725  	f.CleanUpCache(false)
  1726  
  1727  	do := f.Fs.Features().CleanUp
  1728  	if do == nil {
  1729  		return nil
  1730  	}
  1731  
  1732  	return do(ctx)
  1733  }
  1734  
  1735  // About gets quota information from the Fs
  1736  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1737  	do := f.Fs.Features().About
  1738  	if do == nil {
  1739  		return nil, errors.New("About not supported")
  1740  	}
  1741  	return do(ctx)
  1742  }
  1743  
  1744  // Stats returns stats about the cache storage
  1745  func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
  1746  	return f.cache.Stats()
  1747  }
  1748  
  1749  // openRateLimited will execute a closure under a rate limiter watch
  1750  func (f *Fs) openRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, error) {
  1751  	var err error
  1752  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
  1753  	defer cancel()
  1754  	start := time.Now()
  1755  
  1756  	if err = f.rateLimiter.Wait(ctx); err != nil {
  1757  		return nil, err
  1758  	}
  1759  
  1760  	elapsed := time.Since(start)
  1761  	if elapsed > time.Second*2 {
  1762  		fs.Debugf(f, "rate limited: %s", elapsed)
  1763  	}
  1764  	return fn()
  1765  }
  1766  
  1767  // CleanUpCache will cleanup only the cache data that is expired
  1768  func (f *Fs) CleanUpCache(ignoreLastTs bool) {
  1769  	f.cleanupMu.Lock()
  1770  	defer f.cleanupMu.Unlock()
  1771  
  1772  	if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
  1773  		f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
  1774  		f.lastChunkCleanup = time.Now()
  1775  	}
  1776  }
  1777  
  1778  // StopBackgroundRunners will signall all the runners to stop their work
  1779  // can be triggered from a terminate signal or from testing between runs
  1780  func (f *Fs) StopBackgroundRunners() {
  1781  	f.cleanupChan <- false
  1782  	if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
  1783  		f.backgroundRunner.close()
  1784  	}
  1785  	f.cache.Close()
  1786  	fs.Debugf(f, "Services stopped")
  1787  }
  1788  
  1789  // UnWrap returns the Fs that this Fs is wrapping
  1790  func (f *Fs) UnWrap() fs.Fs {
  1791  	return f.Fs
  1792  }
  1793  
  1794  // WrapFs returns the Fs that is wrapping this Fs
  1795  func (f *Fs) WrapFs() fs.Fs {
  1796  	return f.wrapper
  1797  }
  1798  
  1799  // SetWrapper sets the Fs that is wrapping this Fs
  1800  func (f *Fs) SetWrapper(wrapper fs.Fs) {
  1801  	f.wrapper = wrapper
  1802  }
  1803  
  1804  // isWrappedByCrypt checks if this is wrapped by a crypt remote
  1805  func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
  1806  	if f.wrapper == nil {
  1807  		return nil, false
  1808  	}
  1809  	c, ok := f.wrapper.(*crypt.Fs)
  1810  	return c, ok
  1811  }
  1812  
  1813  // cleanRootFromPath trims the root of the current fs from a path
  1814  func (f *Fs) cleanRootFromPath(p string) string {
  1815  	if f.Root() != "" {
  1816  		p = p[len(f.Root()):] // trim out root
  1817  		if len(p) > 0 {       // remove first separator
  1818  			p = p[1:]
  1819  		}
  1820  	}
  1821  
  1822  	return p
  1823  }
  1824  
  1825  func (f *Fs) isRootInPath(p string) bool {
  1826  	if f.Root() == "" {
  1827  		return true
  1828  	}
  1829  	return strings.HasPrefix(p, f.Root()+"/")
  1830  }
  1831  
  1832  // DirCacheFlush flushes the dir cache
  1833  func (f *Fs) DirCacheFlush() {
  1834  	_ = f.cache.RemoveDir("")
  1835  }
  1836  
  1837  // GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
  1838  // in the background
  1839  func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
  1840  	if f.opt.TempWritePath != "" {
  1841  		return f.backgroundRunner.notifyCh
  1842  	}
  1843  	return nil
  1844  }
  1845  
  1846  func (f *Fs) isNotifiedRemote(remote string) bool {
  1847  	f.notifiedMu.Lock()
  1848  	defer f.notifiedMu.Unlock()
  1849  
  1850  	n, ok := f.notifiedRemotes[remote]
  1851  	if !ok || !n {
  1852  		return false
  1853  	}
  1854  
  1855  	delete(f.notifiedRemotes, remote)
  1856  	return n
  1857  }
  1858  
  1859  func cleanPath(p string) string {
  1860  	p = path.Clean(p)
  1861  	if p == "." || p == "/" {
  1862  		p = ""
  1863  	}
  1864  
  1865  	return p
  1866  }
  1867  
  1868  // UserInfo returns info about the connected user
  1869  func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
  1870  	do := f.Fs.Features().UserInfo
  1871  	if do == nil {
  1872  		return nil, fs.ErrorNotImplemented
  1873  	}
  1874  	return do(ctx)
  1875  }
  1876  
  1877  // Disconnect the current user
  1878  func (f *Fs) Disconnect(ctx context.Context) error {
  1879  	do := f.Fs.Features().Disconnect
  1880  	if do == nil {
  1881  		return fs.ErrorNotImplemented
  1882  	}
  1883  	return do(ctx)
  1884  }
  1885  
  1886  var commandHelp = []fs.CommandHelp{
  1887  	{
  1888  		Name:  "stats",
  1889  		Short: "Print stats on the cache backend in JSON format.",
  1890  	},
  1891  }
  1892  
  1893  // Command the backend to run a named command
  1894  //
  1895  // The command run is name
  1896  // args may be used to read arguments from
  1897  // opts may be used to read optional arguments from
  1898  //
  1899  // The result should be capable of being JSON encoded
  1900  // If it is a string or a []string it will be shown to the user
  1901  // otherwise it will be JSON encoded and shown to the user like that
  1902  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
  1903  	switch name {
  1904  	case "stats":
  1905  		return f.Stats()
  1906  	default:
  1907  		return nil, fs.ErrorCommandNotFound
  1908  	}
  1909  }
  1910  
  1911  // Check the interfaces are satisfied
  1912  var (
  1913  	_ fs.Fs             = (*Fs)(nil)
  1914  	_ fs.Purger         = (*Fs)(nil)
  1915  	_ fs.Copier         = (*Fs)(nil)
  1916  	_ fs.Mover          = (*Fs)(nil)
  1917  	_ fs.DirMover       = (*Fs)(nil)
  1918  	_ fs.PutUncheckeder = (*Fs)(nil)
  1919  	_ fs.PutStreamer    = (*Fs)(nil)
  1920  	_ fs.CleanUpper     = (*Fs)(nil)
  1921  	_ fs.UnWrapper      = (*Fs)(nil)
  1922  	_ fs.Wrapper        = (*Fs)(nil)
  1923  	_ fs.ListRer        = (*Fs)(nil)
  1924  	_ fs.ChangeNotifier = (*Fs)(nil)
  1925  	_ fs.Abouter        = (*Fs)(nil)
  1926  	_ fs.UserInfoer     = (*Fs)(nil)
  1927  	_ fs.Disconnecter   = (*Fs)(nil)
  1928  	_ fs.Commander      = (*Fs)(nil)
  1929  )